Compare commits
1 Commits
4b557a923c
...
foss_stora
| Author | SHA1 | Date | |
|---|---|---|---|
| 4989a59ddf |
2287
Cargo.lock
generated
2287
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -11,8 +11,8 @@ metrics-exporter-prometheus = { version = "0.16.2", features=["http-listener"]}
|
||||
# minio = "0.1.0"
|
||||
minio = {git="https://github.com/minio/minio-rs.git", rev = "c28f576"}
|
||||
reqwest = { version = "0.12", features = ["gzip", "default", "rustls-tls"] }
|
||||
rusqlite = { version = "0.34.0", features = ["bundled"] }
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
surrealdb = "2.2"
|
||||
tokio = { version="1.41.0", features = ["full"] }
|
||||
toml = "0.8.20"
|
||||
tracing = "0.1"
|
||||
|
||||
@@ -3,9 +3,8 @@ surreal_url = "localhost:8000"
|
||||
surreal_username = "root"
|
||||
surreal_password = "root"
|
||||
surreal_ns = "test"
|
||||
surreal_db = "v1.19.5"
|
||||
surreal_db = "v1.19.2"
|
||||
|
||||
# Crawler config
|
||||
crawl_filter = "en.wikipedia.org"
|
||||
start_url = "https://en.wikipedia.org"
|
||||
budget = 100
|
||||
crawl_filter = "en.wikipedia.com"
|
||||
budget = 1000
|
||||
|
||||
34
README.md
34
README.md
@@ -2,43 +2,13 @@
|
||||
|
||||
Crawls sites saving all the found links to a surrealdb database. It then proceeds to take batches of 100 uncrawled links untill the crawl budget is reached. It saves the data of each site in a minio database.
|
||||
|
||||
## How to use
|
||||
|
||||
1. Clone the repo and `cd` into it.
|
||||
2. Build the repo with `cargo build -r`
|
||||
3. Start the docker conatiners
|
||||
1. cd into the docker folder `cd docker`
|
||||
2. Bring up the docker containers `docker compose up -d`
|
||||
4. From the project's root, edit the `Crawler.toml` file to your liking.
|
||||
5. Run with `./target/release/internet_mapper`
|
||||
|
||||
You can view stats of the project at `http://<your-ip>:3000/dashboards`
|
||||
|
||||
```bash
|
||||
# Untested script but probably works
|
||||
git clone https://git.oliveratkinson.net/Oliver/internet_mapper.git
|
||||
cd internet_mapper
|
||||
|
||||
cargo build -r
|
||||
|
||||
cd docker
|
||||
docker compose up -d
|
||||
cd ..
|
||||
|
||||
$EDITOR Crawler.toml
|
||||
|
||||
./target/release/internet_mapper
|
||||
|
||||
```
|
||||
|
||||
### TODO
|
||||
|
||||
- [x] Domain filtering - prevent the crawler from going on alternate versions of wikipedia.
|
||||
- [ ] Domain filtering - prevent the crawler from going on alternate versions of wikipedia.
|
||||
- [ ] Conditionally save content - based on filename or file contents
|
||||
- [x] GUI / TUI ? - Graphana
|
||||
- [x] Better asynchronous getting of the sites. Currently it all happens serially.
|
||||
- [x] Allow for storing asynchronously - dropping the "links to" logic fixes this need
|
||||
- [x] Control crawler via config file (no recompliation needed)
|
||||
- [ ] Allow for storing asynchronously
|
||||
|
||||
3/17/25: Took >1hr to crawl 100 pages
|
||||
|
||||
|
||||
@@ -66,3 +66,4 @@ volumes:
|
||||
grafana_storage:
|
||||
alloy_storage:
|
||||
surrealdb_storage:
|
||||
minio_storage:
|
||||
|
||||
46
src/db.rs
46
src/db.rs
@@ -1,12 +1,7 @@
|
||||
use metrics::counter;
|
||||
use rusqlite::Connection;
|
||||
use std::fmt::Debug;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use surrealdb::{
|
||||
engine::remote::ws::{Client, Ws},
|
||||
opt::auth::Root,
|
||||
sql::Thing,
|
||||
Surreal,
|
||||
};
|
||||
use tracing::{error, instrument, trace};
|
||||
use url::Url;
|
||||
|
||||
@@ -49,11 +44,16 @@ impl Website {
|
||||
|
||||
// Insert ever item in the vec into surreal, crawled state will be preserved as TRUE
|
||||
// if already in the database as such or incoming data is TRUE.
|
||||
#[instrument(skip(db))]
|
||||
pub async fn store_all(all: Vec<Self>, db: &Surreal<Client>) -> Vec<Thing> {
|
||||
pub async fn store_all(all: Vec<Self>, db: &Connection) {
|
||||
counter!(STORE).increment(1);
|
||||
let mut things = Vec::with_capacity(all.len());
|
||||
|
||||
rusqlite::ParamsFromIter;
|
||||
|
||||
db.execute("",
|
||||
params![]
|
||||
);
|
||||
|
||||
match db
|
||||
.query(
|
||||
"INSERT INTO website $array
|
||||
@@ -90,32 +90,10 @@ pub struct Record {
|
||||
pub id: Thing,
|
||||
}
|
||||
|
||||
#[instrument(skip_all, name = "SurrealDB")]
|
||||
pub async fn connect(config: &Config) -> surrealdb::Result<Surreal<Client>> {
|
||||
trace!("Establishing connection to surreal...");
|
||||
#[instrument(skip_all, name = "sqlite_connect")]
|
||||
pub async fn connect(config: &Config) -> Result<Connection, rusqlite::Error> {
|
||||
trace!("Establishing connection to sqlite...");
|
||||
// Connect to the server
|
||||
let db = Surreal::new::<Ws>(&config.surreal_url).await?;
|
||||
|
||||
trace!("Logging in...");
|
||||
// Signin as a namespace, database, or root user
|
||||
db.signin(Root {
|
||||
username: &config.surreal_username,
|
||||
password: &config.surreal_password,
|
||||
})
|
||||
.await?;
|
||||
|
||||
// Select a specific namespace / database
|
||||
db.use_ns(&config.surreal_ns)
|
||||
.use_db(&config.surreal_db)
|
||||
.await?;
|
||||
|
||||
let setup = include_bytes!("setup.surql");
|
||||
let file = setup.iter().map(|c| *c as char).collect::<String>();
|
||||
|
||||
db.query(file)
|
||||
.await
|
||||
.expect("Failed to setup surreal tables.");
|
||||
|
||||
Ok(db)
|
||||
Connection::open("./squeelite.db")
|
||||
}
|
||||
|
||||
|
||||
@@ -5,10 +5,7 @@ use tracing::{debug, error, instrument, trace, warn};
|
||||
use url::Url;
|
||||
|
||||
#[instrument(skip(data))]
|
||||
/// Returns whether or not the saved file should be parsed.
|
||||
/// If the file is just data, like an image, it doesn't need to be parsed.
|
||||
/// If it's html, then it does need to be parsed.
|
||||
pub async fn store(data: &str, url: &Url) -> bool {
|
||||
pub async fn store(data: &str, url: &Url) {
|
||||
// extract data from url to save it accurately
|
||||
let url_path = PathBuf::from("./downloaded/".to_string() + url.domain().unwrap_or("UnknownDomain") + url.path());
|
||||
|
||||
@@ -24,20 +21,19 @@ pub async fn store(data: &str, url: &Url) -> bool {
|
||||
(url_path.clone(), "index.html".into())
|
||||
};
|
||||
|
||||
let should_parse = filename.ends_with(".html");
|
||||
|
||||
debug!("Writing at: {:?} {:?}", basepath, filename);
|
||||
|
||||
// create the folders
|
||||
if let Err(err) = fs::create_dir_all(&basepath).await {
|
||||
error!("Dir creation: {err} {:?}", basepath);
|
||||
} else {
|
||||
// FIXME I don't think this handles index.html files well...
|
||||
// TODO this should probably append .html to non-described files
|
||||
// create the file if that was successful
|
||||
if let Err(err) = fs::write(&basepath.join(filename), data).await {
|
||||
error!("File creation: {err} {:?}", url_path);
|
||||
}
|
||||
}
|
||||
|
||||
should_parse
|
||||
}
|
||||
|
||||
fn valid_file_extension(take: &&OsStr) -> bool {
|
||||
@@ -45,14 +41,28 @@ fn valid_file_extension(take: &&OsStr) -> bool {
|
||||
let all = los.split('.');
|
||||
match all.last() {
|
||||
Some(s) => {
|
||||
// FIXME it's worth noting that the dumb tlds like .zip are in here,
|
||||
// which could cause problems
|
||||
let all_domains = include_str!("tlds-alpha-by-domain.txt");
|
||||
match s.to_lowercase().as_str() {
|
||||
"html" => true,
|
||||
"css" => true,
|
||||
"js" => true,
|
||||
"ts" => true,
|
||||
"otf" => true, // font
|
||||
|
||||
// check if it is a domain
|
||||
match all_domains.lines().map(str::to_lowercase).find(|x| x==s.to_lowercase().as_str()) {
|
||||
Some(_) => false,
|
||||
None => true
|
||||
"png" => true,
|
||||
"svg" => true,
|
||||
"jpg" => true,
|
||||
"jpeg" => true,
|
||||
"mp4" => true,
|
||||
"mp3" => true,
|
||||
"webp" => true,
|
||||
|
||||
"pdf" => true,
|
||||
"json" => true,
|
||||
"xml" => true,
|
||||
_ => {
|
||||
warn!("Might be forgetting a file extension: {s}");
|
||||
false
|
||||
}
|
||||
}
|
||||
},
|
||||
None => false,
|
||||
|
||||
55
src/main.rs
55
src/main.rs
@@ -24,8 +24,6 @@ const GET_IN_FLIGHT: &str = "gets_in_flight";
|
||||
const SITES_CRAWLED: &str = "pages_crawled";
|
||||
const BEING_PROCESSED: &str = "pages_being_processed";
|
||||
|
||||
const BATCH_SIZE: usize = 2;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct Config {
|
||||
surreal_ns: String,
|
||||
@@ -35,14 +33,11 @@ struct Config {
|
||||
surreal_password: String,
|
||||
|
||||
crawl_filter: String,
|
||||
start_url: String,
|
||||
budget: usize,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
println!("Logs and metrics are provided to the Grafana dashboard");
|
||||
|
||||
let writer = std::fs::OpenOptions::new()
|
||||
.append(true)
|
||||
.create(true)
|
||||
@@ -75,7 +70,8 @@ async fn main() {
|
||||
.expect("failed to install recorder/exporter");
|
||||
|
||||
info!("Starting...");
|
||||
|
||||
// Would probably take these in as parameters from a cli
|
||||
let starting_url = "https://en.wikipedia.org/";
|
||||
// When getting uncrawled pages, name must contain this variable. "" will effectively get ignored.
|
||||
// let crawl_filter = "en.wikipedia.org/";
|
||||
// let budget = 50;
|
||||
@@ -86,7 +82,6 @@ async fn main() {
|
||||
let _ = file.read_to_string(&mut buf);
|
||||
|
||||
let config: Config = toml::from_str(&buf).expect("Failed to parse Crawler.toml");
|
||||
let starting_url = &config.start_url;
|
||||
|
||||
let db = connect(&config)
|
||||
.await
|
||||
@@ -111,7 +106,13 @@ async fn main() {
|
||||
let span = trace_span!("Loop");
|
||||
let span = span.enter();
|
||||
while crawled < config.budget {
|
||||
let uncrawled = get_uncrawled_links(&db, config.budget - crawled, config.crawl_filter.clone()).await;
|
||||
let get_num = if config.budget - crawled < 100 {
|
||||
config.budget - crawled
|
||||
} else {
|
||||
100
|
||||
};
|
||||
|
||||
let uncrawled = get_uncrawled_links(&db, get_num, config.crawl_filter.clone()).await;
|
||||
if uncrawled.is_empty() {
|
||||
info!("Had more budget but finished crawling everything.");
|
||||
return;
|
||||
@@ -137,15 +138,6 @@ async fn main() {
|
||||
}
|
||||
drop(span);
|
||||
|
||||
if let Ok(mut ok) = db.query("count(select id from website where crawled = true)").await {
|
||||
let res = ok.take::<Option<usize>>(0);
|
||||
if let Ok(i) = res {
|
||||
if let Some(n) = i {
|
||||
info!("Total crawled pages now equals {n}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("Done");
|
||||
}
|
||||
|
||||
@@ -166,26 +158,26 @@ async fn process(mut site: Website, db: Surreal<Client>, reqwest: reqwest::Clien
|
||||
// Send the http request (get)
|
||||
if let Ok(response) = request_builder.send().await {
|
||||
|
||||
// TODO if this will fail if the object we are downloading is
|
||||
// larger than the memory of the device it's running on.
|
||||
// We should store it *as* we download it then parse it in-place.
|
||||
// METRICS
|
||||
g.decrement(1);
|
||||
counter!(GET_METRIC).increment(1);
|
||||
|
||||
// Get body from response
|
||||
let data = response
|
||||
.text()
|
||||
.await
|
||||
.expect("Failed to read http response's body!");
|
||||
|
||||
// METRICS
|
||||
g.decrement(1);
|
||||
counter!(GET_METRIC).increment(1);
|
||||
|
||||
// Store document
|
||||
let should_parse = filesystem::store(&data, &site.site).await;
|
||||
filesystem::store(&data, &site.site).await;
|
||||
|
||||
if should_parse {
|
||||
// Parse document and get relationships
|
||||
let sites = parser::parse(&site, &data).await;
|
||||
|
||||
// update self in db
|
||||
site.set_crawled();
|
||||
Website::store_all(vec![site], &db).await;
|
||||
|
||||
// De-duplicate this list
|
||||
let prev_len = sites.len();
|
||||
let set = sites.into_iter().fold(HashSet::new(), |mut set,item| {
|
||||
@@ -198,11 +190,6 @@ async fn process(mut site: Website, db: Surreal<Client>, reqwest: reqwest::Clien
|
||||
|
||||
// Store all the other sites so that we can link to them.
|
||||
let _ = Website::store_all(de_dupe_sites, &db).await;
|
||||
}
|
||||
|
||||
// update self in db
|
||||
site.set_crawled();
|
||||
Website::store_all(vec![site], &db).await;
|
||||
|
||||
} else {
|
||||
error!("Failed to get: {}", &site.site);
|
||||
@@ -216,11 +203,9 @@ async fn get_uncrawled_links(
|
||||
mut count: usize,
|
||||
filter: String,
|
||||
) -> Vec<Website> {
|
||||
|
||||
if count > BATCH_SIZE {
|
||||
count = BATCH_SIZE;
|
||||
if count > 100 {
|
||||
count = 100
|
||||
}
|
||||
|
||||
debug!("Getting uncrawled links");
|
||||
|
||||
let mut response = db
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user