Compare commits
7 Commits
foss_stora
...
4b557a923c
| Author | SHA1 | Date | |
|---|---|---|---|
| 4b557a923c | |||
| c08a20ac00 | |||
| 94912e9125 | |||
| a9465dda6e | |||
| add6f00ed6 | |||
| 4a433a1a77 | |||
| 03cbcd9ae0 |
2287
Cargo.lock
generated
2287
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -11,8 +11,8 @@ metrics-exporter-prometheus = { version = "0.16.2", features=["http-listener"]}
|
|||||||
# minio = "0.1.0"
|
# minio = "0.1.0"
|
||||||
minio = {git="https://github.com/minio/minio-rs.git", rev = "c28f576"}
|
minio = {git="https://github.com/minio/minio-rs.git", rev = "c28f576"}
|
||||||
reqwest = { version = "0.12", features = ["gzip", "default", "rustls-tls"] }
|
reqwest = { version = "0.12", features = ["gzip", "default", "rustls-tls"] }
|
||||||
rusqlite = { version = "0.34.0", features = ["bundled"] }
|
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
|
surrealdb = "2.2"
|
||||||
tokio = { version="1.41.0", features = ["full"] }
|
tokio = { version="1.41.0", features = ["full"] }
|
||||||
toml = "0.8.20"
|
toml = "0.8.20"
|
||||||
tracing = "0.1"
|
tracing = "0.1"
|
||||||
|
|||||||
@@ -3,8 +3,9 @@ surreal_url = "localhost:8000"
|
|||||||
surreal_username = "root"
|
surreal_username = "root"
|
||||||
surreal_password = "root"
|
surreal_password = "root"
|
||||||
surreal_ns = "test"
|
surreal_ns = "test"
|
||||||
surreal_db = "v1.19.2"
|
surreal_db = "v1.19.5"
|
||||||
|
|
||||||
# Crawler config
|
# Crawler config
|
||||||
crawl_filter = "en.wikipedia.com"
|
crawl_filter = "en.wikipedia.org"
|
||||||
budget = 1000
|
start_url = "https://en.wikipedia.org"
|
||||||
|
budget = 100
|
||||||
|
|||||||
34
README.md
34
README.md
@@ -2,13 +2,43 @@
|
|||||||
|
|
||||||
Crawls sites saving all the found links to a surrealdb database. It then proceeds to take batches of 100 uncrawled links untill the crawl budget is reached. It saves the data of each site in a minio database.
|
Crawls sites saving all the found links to a surrealdb database. It then proceeds to take batches of 100 uncrawled links untill the crawl budget is reached. It saves the data of each site in a minio database.
|
||||||
|
|
||||||
|
## How to use
|
||||||
|
|
||||||
|
1. Clone the repo and `cd` into it.
|
||||||
|
2. Build the repo with `cargo build -r`
|
||||||
|
3. Start the docker conatiners
|
||||||
|
1. cd into the docker folder `cd docker`
|
||||||
|
2. Bring up the docker containers `docker compose up -d`
|
||||||
|
4. From the project's root, edit the `Crawler.toml` file to your liking.
|
||||||
|
5. Run with `./target/release/internet_mapper`
|
||||||
|
|
||||||
|
You can view stats of the project at `http://<your-ip>:3000/dashboards`
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Untested script but probably works
|
||||||
|
git clone https://git.oliveratkinson.net/Oliver/internet_mapper.git
|
||||||
|
cd internet_mapper
|
||||||
|
|
||||||
|
cargo build -r
|
||||||
|
|
||||||
|
cd docker
|
||||||
|
docker compose up -d
|
||||||
|
cd ..
|
||||||
|
|
||||||
|
$EDITOR Crawler.toml
|
||||||
|
|
||||||
|
./target/release/internet_mapper
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
### TODO
|
### TODO
|
||||||
|
|
||||||
- [ ] Domain filtering - prevent the crawler from going on alternate versions of wikipedia.
|
- [x] Domain filtering - prevent the crawler from going on alternate versions of wikipedia.
|
||||||
- [ ] Conditionally save content - based on filename or file contents
|
- [ ] Conditionally save content - based on filename or file contents
|
||||||
- [x] GUI / TUI ? - Graphana
|
- [x] GUI / TUI ? - Graphana
|
||||||
- [x] Better asynchronous getting of the sites. Currently it all happens serially.
|
- [x] Better asynchronous getting of the sites. Currently it all happens serially.
|
||||||
- [ ] Allow for storing asynchronously
|
- [x] Allow for storing asynchronously - dropping the "links to" logic fixes this need
|
||||||
|
- [x] Control crawler via config file (no recompliation needed)
|
||||||
|
|
||||||
3/17/25: Took >1hr to crawl 100 pages
|
3/17/25: Took >1hr to crawl 100 pages
|
||||||
|
|
||||||
|
|||||||
@@ -66,4 +66,3 @@ volumes:
|
|||||||
grafana_storage:
|
grafana_storage:
|
||||||
alloy_storage:
|
alloy_storage:
|
||||||
surrealdb_storage:
|
surrealdb_storage:
|
||||||
minio_storage:
|
|
||||||
|
|||||||
46
src/db.rs
46
src/db.rs
@@ -1,7 +1,12 @@
|
|||||||
use metrics::counter;
|
use metrics::counter;
|
||||||
use rusqlite::Connection;
|
|
||||||
use std::fmt::Debug;
|
use std::fmt::Debug;
|
||||||
use serde::{Deserialize, Serialize};
|
use serde::{Deserialize, Serialize};
|
||||||
|
use surrealdb::{
|
||||||
|
engine::remote::ws::{Client, Ws},
|
||||||
|
opt::auth::Root,
|
||||||
|
sql::Thing,
|
||||||
|
Surreal,
|
||||||
|
};
|
||||||
use tracing::{error, instrument, trace};
|
use tracing::{error, instrument, trace};
|
||||||
use url::Url;
|
use url::Url;
|
||||||
|
|
||||||
@@ -44,16 +49,11 @@ impl Website {
|
|||||||
|
|
||||||
// Insert ever item in the vec into surreal, crawled state will be preserved as TRUE
|
// Insert ever item in the vec into surreal, crawled state will be preserved as TRUE
|
||||||
// if already in the database as such or incoming data is TRUE.
|
// if already in the database as such or incoming data is TRUE.
|
||||||
pub async fn store_all(all: Vec<Self>, db: &Connection) {
|
#[instrument(skip(db))]
|
||||||
|
pub async fn store_all(all: Vec<Self>, db: &Surreal<Client>) -> Vec<Thing> {
|
||||||
counter!(STORE).increment(1);
|
counter!(STORE).increment(1);
|
||||||
let mut things = Vec::with_capacity(all.len());
|
let mut things = Vec::with_capacity(all.len());
|
||||||
|
|
||||||
rusqlite::ParamsFromIter;
|
|
||||||
|
|
||||||
db.execute("",
|
|
||||||
params![]
|
|
||||||
);
|
|
||||||
|
|
||||||
match db
|
match db
|
||||||
.query(
|
.query(
|
||||||
"INSERT INTO website $array
|
"INSERT INTO website $array
|
||||||
@@ -90,10 +90,32 @@ pub struct Record {
|
|||||||
pub id: Thing,
|
pub id: Thing,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[instrument(skip_all, name = "sqlite_connect")]
|
#[instrument(skip_all, name = "SurrealDB")]
|
||||||
pub async fn connect(config: &Config) -> Result<Connection, rusqlite::Error> {
|
pub async fn connect(config: &Config) -> surrealdb::Result<Surreal<Client>> {
|
||||||
trace!("Establishing connection to sqlite...");
|
trace!("Establishing connection to surreal...");
|
||||||
// Connect to the server
|
// Connect to the server
|
||||||
Connection::open("./squeelite.db")
|
let db = Surreal::new::<Ws>(&config.surreal_url).await?;
|
||||||
|
|
||||||
|
trace!("Logging in...");
|
||||||
|
// Signin as a namespace, database, or root user
|
||||||
|
db.signin(Root {
|
||||||
|
username: &config.surreal_username,
|
||||||
|
password: &config.surreal_password,
|
||||||
|
})
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
// Select a specific namespace / database
|
||||||
|
db.use_ns(&config.surreal_ns)
|
||||||
|
.use_db(&config.surreal_db)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let setup = include_bytes!("setup.surql");
|
||||||
|
let file = setup.iter().map(|c| *c as char).collect::<String>();
|
||||||
|
|
||||||
|
db.query(file)
|
||||||
|
.await
|
||||||
|
.expect("Failed to setup surreal tables.");
|
||||||
|
|
||||||
|
Ok(db)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -5,7 +5,10 @@ use tracing::{debug, error, instrument, trace, warn};
|
|||||||
use url::Url;
|
use url::Url;
|
||||||
|
|
||||||
#[instrument(skip(data))]
|
#[instrument(skip(data))]
|
||||||
pub async fn store(data: &str, url: &Url) {
|
/// Returns whether or not the saved file should be parsed.
|
||||||
|
/// If the file is just data, like an image, it doesn't need to be parsed.
|
||||||
|
/// If it's html, then it does need to be parsed.
|
||||||
|
pub async fn store(data: &str, url: &Url) -> bool {
|
||||||
// extract data from url to save it accurately
|
// extract data from url to save it accurately
|
||||||
let url_path = PathBuf::from("./downloaded/".to_string() + url.domain().unwrap_or("UnknownDomain") + url.path());
|
let url_path = PathBuf::from("./downloaded/".to_string() + url.domain().unwrap_or("UnknownDomain") + url.path());
|
||||||
|
|
||||||
@@ -21,19 +24,20 @@ pub async fn store(data: &str, url: &Url) {
|
|||||||
(url_path.clone(), "index.html".into())
|
(url_path.clone(), "index.html".into())
|
||||||
};
|
};
|
||||||
|
|
||||||
|
let should_parse = filename.ends_with(".html");
|
||||||
|
|
||||||
debug!("Writing at: {:?} {:?}", basepath, filename);
|
debug!("Writing at: {:?} {:?}", basepath, filename);
|
||||||
|
|
||||||
// create the folders
|
// create the folders
|
||||||
if let Err(err) = fs::create_dir_all(&basepath).await {
|
if let Err(err) = fs::create_dir_all(&basepath).await {
|
||||||
error!("Dir creation: {err} {:?}", basepath);
|
error!("Dir creation: {err} {:?}", basepath);
|
||||||
} else {
|
} else {
|
||||||
// FIXME I don't think this handles index.html files well...
|
|
||||||
// TODO this should probably append .html to non-described files
|
|
||||||
// create the file if that was successful
|
|
||||||
if let Err(err) = fs::write(&basepath.join(filename), data).await {
|
if let Err(err) = fs::write(&basepath.join(filename), data).await {
|
||||||
error!("File creation: {err} {:?}", url_path);
|
error!("File creation: {err} {:?}", url_path);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
should_parse
|
||||||
}
|
}
|
||||||
|
|
||||||
fn valid_file_extension(take: &&OsStr) -> bool {
|
fn valid_file_extension(take: &&OsStr) -> bool {
|
||||||
@@ -41,28 +45,14 @@ fn valid_file_extension(take: &&OsStr) -> bool {
|
|||||||
let all = los.split('.');
|
let all = los.split('.');
|
||||||
match all.last() {
|
match all.last() {
|
||||||
Some(s) => {
|
Some(s) => {
|
||||||
match s.to_lowercase().as_str() {
|
// FIXME it's worth noting that the dumb tlds like .zip are in here,
|
||||||
"html" => true,
|
// which could cause problems
|
||||||
"css" => true,
|
let all_domains = include_str!("tlds-alpha-by-domain.txt");
|
||||||
"js" => true,
|
|
||||||
"ts" => true,
|
|
||||||
"otf" => true, // font
|
|
||||||
|
|
||||||
"png" => true,
|
// check if it is a domain
|
||||||
"svg" => true,
|
match all_domains.lines().map(str::to_lowercase).find(|x| x==s.to_lowercase().as_str()) {
|
||||||
"jpg" => true,
|
Some(_) => false,
|
||||||
"jpeg" => true,
|
None => true
|
||||||
"mp4" => true,
|
|
||||||
"mp3" => true,
|
|
||||||
"webp" => true,
|
|
||||||
|
|
||||||
"pdf" => true,
|
|
||||||
"json" => true,
|
|
||||||
"xml" => true,
|
|
||||||
_ => {
|
|
||||||
warn!("Might be forgetting a file extension: {s}");
|
|
||||||
false
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
None => false,
|
None => false,
|
||||||
|
|||||||
57
src/main.rs
57
src/main.rs
@@ -24,6 +24,8 @@ const GET_IN_FLIGHT: &str = "gets_in_flight";
|
|||||||
const SITES_CRAWLED: &str = "pages_crawled";
|
const SITES_CRAWLED: &str = "pages_crawled";
|
||||||
const BEING_PROCESSED: &str = "pages_being_processed";
|
const BEING_PROCESSED: &str = "pages_being_processed";
|
||||||
|
|
||||||
|
const BATCH_SIZE: usize = 2;
|
||||||
|
|
||||||
#[derive(Deserialize)]
|
#[derive(Deserialize)]
|
||||||
struct Config {
|
struct Config {
|
||||||
surreal_ns: String,
|
surreal_ns: String,
|
||||||
@@ -33,11 +35,14 @@ struct Config {
|
|||||||
surreal_password: String,
|
surreal_password: String,
|
||||||
|
|
||||||
crawl_filter: String,
|
crawl_filter: String,
|
||||||
|
start_url: String,
|
||||||
budget: usize,
|
budget: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
|
println!("Logs and metrics are provided to the Grafana dashboard");
|
||||||
|
|
||||||
let writer = std::fs::OpenOptions::new()
|
let writer = std::fs::OpenOptions::new()
|
||||||
.append(true)
|
.append(true)
|
||||||
.create(true)
|
.create(true)
|
||||||
@@ -70,8 +75,7 @@ async fn main() {
|
|||||||
.expect("failed to install recorder/exporter");
|
.expect("failed to install recorder/exporter");
|
||||||
|
|
||||||
info!("Starting...");
|
info!("Starting...");
|
||||||
// Would probably take these in as parameters from a cli
|
|
||||||
let starting_url = "https://en.wikipedia.org/";
|
|
||||||
// When getting uncrawled pages, name must contain this variable. "" will effectively get ignored.
|
// When getting uncrawled pages, name must contain this variable. "" will effectively get ignored.
|
||||||
// let crawl_filter = "en.wikipedia.org/";
|
// let crawl_filter = "en.wikipedia.org/";
|
||||||
// let budget = 50;
|
// let budget = 50;
|
||||||
@@ -82,6 +86,7 @@ async fn main() {
|
|||||||
let _ = file.read_to_string(&mut buf);
|
let _ = file.read_to_string(&mut buf);
|
||||||
|
|
||||||
let config: Config = toml::from_str(&buf).expect("Failed to parse Crawler.toml");
|
let config: Config = toml::from_str(&buf).expect("Failed to parse Crawler.toml");
|
||||||
|
let starting_url = &config.start_url;
|
||||||
|
|
||||||
let db = connect(&config)
|
let db = connect(&config)
|
||||||
.await
|
.await
|
||||||
@@ -106,13 +111,7 @@ async fn main() {
|
|||||||
let span = trace_span!("Loop");
|
let span = trace_span!("Loop");
|
||||||
let span = span.enter();
|
let span = span.enter();
|
||||||
while crawled < config.budget {
|
while crawled < config.budget {
|
||||||
let get_num = if config.budget - crawled < 100 {
|
let uncrawled = get_uncrawled_links(&db, config.budget - crawled, config.crawl_filter.clone()).await;
|
||||||
config.budget - crawled
|
|
||||||
} else {
|
|
||||||
100
|
|
||||||
};
|
|
||||||
|
|
||||||
let uncrawled = get_uncrawled_links(&db, get_num, config.crawl_filter.clone()).await;
|
|
||||||
if uncrawled.is_empty() {
|
if uncrawled.is_empty() {
|
||||||
info!("Had more budget but finished crawling everything.");
|
info!("Had more budget but finished crawling everything.");
|
||||||
return;
|
return;
|
||||||
@@ -138,6 +137,15 @@ async fn main() {
|
|||||||
}
|
}
|
||||||
drop(span);
|
drop(span);
|
||||||
|
|
||||||
|
if let Ok(mut ok) = db.query("count(select id from website where crawled = true)").await {
|
||||||
|
let res = ok.take::<Option<usize>>(0);
|
||||||
|
if let Ok(i) = res {
|
||||||
|
if let Some(n) = i {
|
||||||
|
info!("Total crawled pages now equals {n}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
info!("Done");
|
info!("Done");
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -158,26 +166,26 @@ async fn process(mut site: Website, db: Surreal<Client>, reqwest: reqwest::Clien
|
|||||||
// Send the http request (get)
|
// Send the http request (get)
|
||||||
if let Ok(response) = request_builder.send().await {
|
if let Ok(response) = request_builder.send().await {
|
||||||
|
|
||||||
// METRICS
|
// TODO if this will fail if the object we are downloading is
|
||||||
g.decrement(1);
|
// larger than the memory of the device it's running on.
|
||||||
counter!(GET_METRIC).increment(1);
|
// We should store it *as* we download it then parse it in-place.
|
||||||
|
|
||||||
// Get body from response
|
// Get body from response
|
||||||
let data = response
|
let data = response
|
||||||
.text()
|
.text()
|
||||||
.await
|
.await
|
||||||
.expect("Failed to read http response's body!");
|
.expect("Failed to read http response's body!");
|
||||||
|
|
||||||
// Store document
|
// METRICS
|
||||||
filesystem::store(&data, &site.site).await;
|
g.decrement(1);
|
||||||
|
counter!(GET_METRIC).increment(1);
|
||||||
|
|
||||||
|
// Store document
|
||||||
|
let should_parse = filesystem::store(&data, &site.site).await;
|
||||||
|
|
||||||
|
if should_parse {
|
||||||
// Parse document and get relationships
|
// Parse document and get relationships
|
||||||
let sites = parser::parse(&site, &data).await;
|
let sites = parser::parse(&site, &data).await;
|
||||||
|
|
||||||
// update self in db
|
|
||||||
site.set_crawled();
|
|
||||||
Website::store_all(vec![site], &db).await;
|
|
||||||
|
|
||||||
// De-duplicate this list
|
// De-duplicate this list
|
||||||
let prev_len = sites.len();
|
let prev_len = sites.len();
|
||||||
let set = sites.into_iter().fold(HashSet::new(), |mut set,item| {
|
let set = sites.into_iter().fold(HashSet::new(), |mut set,item| {
|
||||||
@@ -190,6 +198,11 @@ async fn process(mut site: Website, db: Surreal<Client>, reqwest: reqwest::Clien
|
|||||||
|
|
||||||
// Store all the other sites so that we can link to them.
|
// Store all the other sites so that we can link to them.
|
||||||
let _ = Website::store_all(de_dupe_sites, &db).await;
|
let _ = Website::store_all(de_dupe_sites, &db).await;
|
||||||
|
}
|
||||||
|
|
||||||
|
// update self in db
|
||||||
|
site.set_crawled();
|
||||||
|
Website::store_all(vec![site], &db).await;
|
||||||
|
|
||||||
} else {
|
} else {
|
||||||
error!("Failed to get: {}", &site.site);
|
error!("Failed to get: {}", &site.site);
|
||||||
@@ -203,9 +216,11 @@ async fn get_uncrawled_links(
|
|||||||
mut count: usize,
|
mut count: usize,
|
||||||
filter: String,
|
filter: String,
|
||||||
) -> Vec<Website> {
|
) -> Vec<Website> {
|
||||||
if count > 100 {
|
|
||||||
count = 100
|
if count > BATCH_SIZE {
|
||||||
|
count = BATCH_SIZE;
|
||||||
}
|
}
|
||||||
|
|
||||||
debug!("Getting uncrawled links");
|
debug!("Getting uncrawled links");
|
||||||
|
|
||||||
let mut response = db
|
let mut response = db
|
||||||
|
|||||||
1444
src/tlds-alpha-by-domain.txt
Normal file
1444
src/tlds-alpha-by-domain.txt
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user