Compare commits
11 Commits
foss_stora
...
0fd76b1734
| Author | SHA1 | Date | |
|---|---|---|---|
| 0fd76b1734 | |||
| 9bfa8f9108 | |||
| bdb1094a30 | |||
| 9aa2d9ce22 | |||
| 4b557a923c | |||
| c08a20ac00 | |||
| 94912e9125 | |||
| a9465dda6e | |||
| add6f00ed6 | |||
| 4a433a1a77 | |||
| 03cbcd9ae0 |
19
.vscode/launch.json
vendored
19
.vscode/launch.json
vendored
@@ -7,18 +7,15 @@
|
|||||||
{
|
{
|
||||||
"type": "lldb",
|
"type": "lldb",
|
||||||
"request": "launch",
|
"request": "launch",
|
||||||
"name": "Debug executable 'surreal_spider'",
|
"name": "Debug executable 'internet_mapper'",
|
||||||
"env": {
|
|
||||||
"RUST_LOG": "surreal_spider=trace,reqwest=info",
|
|
||||||
},
|
|
||||||
"cargo": {
|
"cargo": {
|
||||||
"args": [
|
"args": [
|
||||||
"build",
|
"build",
|
||||||
"--bin=surreal_spider",
|
"--bin=internet_mapper",
|
||||||
"--package=surreal_spider"
|
"--package=internet_mapper"
|
||||||
],
|
],
|
||||||
"filter": {
|
"filter": {
|
||||||
"name": "surreal_spider",
|
"name": "internet_mapper",
|
||||||
"kind": "bin"
|
"kind": "bin"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
@@ -28,16 +25,16 @@
|
|||||||
{
|
{
|
||||||
"type": "lldb",
|
"type": "lldb",
|
||||||
"request": "launch",
|
"request": "launch",
|
||||||
"name": "Debug unit tests in executable 'surreal_spider'",
|
"name": "Debug unit tests in executable 'internet_mapper'",
|
||||||
"cargo": {
|
"cargo": {
|
||||||
"args": [
|
"args": [
|
||||||
"test",
|
"test",
|
||||||
"--no-run",
|
"--no-run",
|
||||||
"--bin=surreal_spider",
|
"--bin=internet_mapper",
|
||||||
"--package=surreal_spider"
|
"--package=internet_mapper"
|
||||||
],
|
],
|
||||||
"filter": {
|
"filter": {
|
||||||
"name": "surreal_spider",
|
"name": "internet_mapper",
|
||||||
"kind": "bin"
|
"kind": "bin"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
|||||||
2
.vscode/settings.json
vendored
2
.vscode/settings.json
vendored
@@ -3,6 +3,6 @@
|
|||||||
"creds",
|
"creds",
|
||||||
"reqwest",
|
"reqwest",
|
||||||
"rustls",
|
"rustls",
|
||||||
"surql"
|
"surql",
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
1
Cargo.lock
generated
1
Cargo.lock
generated
@@ -1966,6 +1966,7 @@ name = "internet_mapper"
|
|||||||
version = "0.1.0"
|
version = "0.1.0"
|
||||||
dependencies = [
|
dependencies = [
|
||||||
"base64 0.22.1",
|
"base64 0.22.1",
|
||||||
|
"futures-util",
|
||||||
"html5ever 0.29.1",
|
"html5ever 0.29.1",
|
||||||
"metrics",
|
"metrics",
|
||||||
"metrics-exporter-prometheus",
|
"metrics-exporter-prometheus",
|
||||||
|
|||||||
@@ -5,12 +5,13 @@ edition = "2021"
|
|||||||
|
|
||||||
[dependencies]
|
[dependencies]
|
||||||
base64 = "0.22.1"
|
base64 = "0.22.1"
|
||||||
|
futures-util = "0.3.31"
|
||||||
html5ever = "0.29"
|
html5ever = "0.29"
|
||||||
metrics = "0.24.1"
|
metrics = "0.24.1"
|
||||||
metrics-exporter-prometheus = { version = "0.16.2", features=["http-listener"]}
|
metrics-exporter-prometheus = { version = "0.16.2", features=["http-listener"]}
|
||||||
# minio = "0.1.0"
|
# minio = "0.1.0"
|
||||||
minio = {git="https://github.com/minio/minio-rs.git", rev = "c28f576"}
|
minio = {git="https://github.com/minio/minio-rs.git", rev = "c28f576"}
|
||||||
reqwest = { version = "0.12", features = ["gzip", "default", "rustls-tls"] }
|
reqwest = { version = "0.12", features = ["gzip", "default", "rustls-tls", "stream"] }
|
||||||
serde = { version = "1.0", features = ["derive"] }
|
serde = { version = "1.0", features = ["derive"] }
|
||||||
surrealdb = "2.2"
|
surrealdb = "2.2"
|
||||||
tokio = { version="1.41.0", features = ["full"] }
|
tokio = { version="1.41.0", features = ["full"] }
|
||||||
|
|||||||
10
Crawler.toml
10
Crawler.toml
@@ -3,8 +3,12 @@ surreal_url = "localhost:8000"
|
|||||||
surreal_username = "root"
|
surreal_username = "root"
|
||||||
surreal_password = "root"
|
surreal_password = "root"
|
||||||
surreal_ns = "test"
|
surreal_ns = "test"
|
||||||
surreal_db = "v1.19.2"
|
surreal_db = "v1.20.3"
|
||||||
|
|
||||||
# Crawler config
|
# Crawler config
|
||||||
crawl_filter = "en.wikipedia.com"
|
crawl_filter = "https://ftpgeoinfo.msl.mt.gov/Data/Spatial/MSDI/Imagery/2023_NAIP/UTM_County_Mosaics/"
|
||||||
budget = 1000
|
# crawl_filter = "https://oliveratkinson.net"
|
||||||
|
start_url = "https://ftpgeoinfo.msl.mt.gov/Data/Spatial/MSDI/Imagery/2023_NAIP/UTM_County_Mosaics/"
|
||||||
|
# start_url = "https://oliveratkinson.net"
|
||||||
|
budget = 100
|
||||||
|
batch_size = 5
|
||||||
|
|||||||
34
README.md
34
README.md
@@ -2,13 +2,43 @@
|
|||||||
|
|
||||||
Crawls sites saving all the found links to a surrealdb database. It then proceeds to take batches of 100 uncrawled links untill the crawl budget is reached. It saves the data of each site in a minio database.
|
Crawls sites saving all the found links to a surrealdb database. It then proceeds to take batches of 100 uncrawled links untill the crawl budget is reached. It saves the data of each site in a minio database.
|
||||||
|
|
||||||
|
## How to use
|
||||||
|
|
||||||
|
1. Clone the repo and `cd` into it.
|
||||||
|
2. Build the repo with `cargo build -r`
|
||||||
|
3. Start the docker conatiners
|
||||||
|
1. cd into the docker folder `cd docker`
|
||||||
|
2. Bring up the docker containers `docker compose up -d`
|
||||||
|
4. From the project's root, edit the `Crawler.toml` file to your liking.
|
||||||
|
5. Run with `./target/release/internet_mapper`
|
||||||
|
|
||||||
|
You can view stats of the project at `http://<your-ip>:3000/dashboards`
|
||||||
|
|
||||||
|
```bash
|
||||||
|
# Untested script but probably works
|
||||||
|
git clone https://git.oliveratkinson.net/Oliver/internet_mapper.git
|
||||||
|
cd internet_mapper
|
||||||
|
|
||||||
|
cargo build -r
|
||||||
|
|
||||||
|
cd docker
|
||||||
|
docker compose up -d
|
||||||
|
cd ..
|
||||||
|
|
||||||
|
$EDITOR Crawler.toml
|
||||||
|
|
||||||
|
./target/release/internet_mapper
|
||||||
|
|
||||||
|
```
|
||||||
|
|
||||||
### TODO
|
### TODO
|
||||||
|
|
||||||
- [ ] Domain filtering - prevent the crawler from going on alternate versions of wikipedia.
|
- [x] Domain filtering - prevent the crawler from going on alternate versions of wikipedia.
|
||||||
- [ ] Conditionally save content - based on filename or file contents
|
- [ ] Conditionally save content - based on filename or file contents
|
||||||
- [x] GUI / TUI ? - Graphana
|
- [x] GUI / TUI ? - Graphana
|
||||||
- [x] Better asynchronous getting of the sites. Currently it all happens serially.
|
- [x] Better asynchronous getting of the sites. Currently it all happens serially.
|
||||||
- [ ] Allow for storing asynchronously
|
- [x] Allow for storing asynchronously - dropping the "links to" logic fixes this need
|
||||||
|
- [x] Control crawler via config file (no recompliation needed)
|
||||||
|
|
||||||
3/17/25: Took >1hr to crawl 100 pages
|
3/17/25: Took >1hr to crawl 100 pages
|
||||||
|
|
||||||
|
|||||||
@@ -66,4 +66,3 @@ volumes:
|
|||||||
grafana_storage:
|
grafana_storage:
|
||||||
alloy_storage:
|
alloy_storage:
|
||||||
surrealdb_storage:
|
surrealdb_storage:
|
||||||
minio_storage:
|
|
||||||
|
|||||||
@@ -49,6 +49,7 @@ impl Website {
|
|||||||
|
|
||||||
// Insert ever item in the vec into surreal, crawled state will be preserved as TRUE
|
// Insert ever item in the vec into surreal, crawled state will be preserved as TRUE
|
||||||
// if already in the database as such or incoming data is TRUE.
|
// if already in the database as such or incoming data is TRUE.
|
||||||
|
#[instrument(skip(db))]
|
||||||
pub async fn store_all(all: Vec<Self>, db: &Surreal<Client>) -> Vec<Thing> {
|
pub async fn store_all(all: Vec<Self>, db: &Surreal<Client>) -> Vec<Thing> {
|
||||||
counter!(STORE).increment(1);
|
counter!(STORE).increment(1);
|
||||||
let mut things = Vec::with_capacity(all.len());
|
let mut things = Vec::with_capacity(all.len());
|
||||||
|
|||||||
@@ -1,11 +1,10 @@
|
|||||||
use std::{ffi::OsStr, path::PathBuf};
|
use std::{ffi::OsStr, io::ErrorKind, path::PathBuf};
|
||||||
|
|
||||||
use tokio::fs;
|
use tokio::fs;
|
||||||
use tracing::{debug, error, instrument, trace, warn};
|
use tracing::{error, trace};
|
||||||
use url::Url;
|
use url::Url;
|
||||||
|
|
||||||
#[instrument(skip(data))]
|
pub fn as_path(url: &Url) -> PathBuf {
|
||||||
pub async fn store(data: &str, url: &Url) {
|
|
||||||
// extract data from url to save it accurately
|
// extract data from url to save it accurately
|
||||||
let url_path = PathBuf::from("./downloaded/".to_string() + url.domain().unwrap_or("UnknownDomain") + url.path());
|
let url_path = PathBuf::from("./downloaded/".to_string() + url.domain().unwrap_or("UnknownDomain") + url.path());
|
||||||
|
|
||||||
@@ -21,17 +20,40 @@ pub async fn store(data: &str, url: &Url) {
|
|||||||
(url_path.clone(), "index.html".into())
|
(url_path.clone(), "index.html".into())
|
||||||
};
|
};
|
||||||
|
|
||||||
debug!("Writing at: {:?} {:?}", basepath, filename);
|
let mut path = PathBuf::new();
|
||||||
|
path = path.join(basepath);
|
||||||
|
path = path.join(filename);
|
||||||
|
|
||||||
// create the folders
|
path
|
||||||
if let Err(err) = fs::create_dir_all(&basepath).await {
|
}
|
||||||
error!("Dir creation: {err} {:?}", basepath);
|
|
||||||
} else {
|
pub async fn init(filename: &PathBuf) -> Option<fs::File> {
|
||||||
// FIXME I don't think this handles index.html files well...
|
let file = async || tokio::fs::OpenOptions::new()
|
||||||
// TODO this should probably append .html to non-described files
|
.append(true)
|
||||||
// create the file if that was successful
|
.create(true)
|
||||||
if let Err(err) = fs::write(&basepath.join(filename), data).await {
|
.open(&filename).await;
|
||||||
error!("File creation: {err} {:?}", url_path);
|
|
||||||
|
match file().await {
|
||||||
|
Ok(ok) => Some(ok),
|
||||||
|
Err(err) => {
|
||||||
|
// the file/folder isn't found
|
||||||
|
if err.kind() == ErrorKind::NotFound {
|
||||||
|
if let Some(parent ) = &filename.parent() {
|
||||||
|
// create the folders
|
||||||
|
if let Err(err) = fs::create_dir_all(&parent).await {
|
||||||
|
error!("Dir creation: {err} {:?}", filename);
|
||||||
|
eprintln!("{}", err)
|
||||||
|
} else if let Ok(ok) = file().await {
|
||||||
|
return Some(ok);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
error!("Couldn't get file's parents: {:?}", &filename);
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
error!("File creation: {err} {:?}", filename);
|
||||||
|
}
|
||||||
|
// we don't care about other errors, we can't/shouldn't fix them
|
||||||
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -41,28 +63,14 @@ fn valid_file_extension(take: &&OsStr) -> bool {
|
|||||||
let all = los.split('.');
|
let all = los.split('.');
|
||||||
match all.last() {
|
match all.last() {
|
||||||
Some(s) => {
|
Some(s) => {
|
||||||
match s.to_lowercase().as_str() {
|
// FIXME it's worth noting that the dumb tlds like .zip are in here,
|
||||||
"html" => true,
|
// which could cause problems
|
||||||
"css" => true,
|
let all_domains = include_str!("tlds-alpha-by-domain.txt");
|
||||||
"js" => true,
|
|
||||||
"ts" => true,
|
|
||||||
"otf" => true, // font
|
|
||||||
|
|
||||||
"png" => true,
|
// check if it is a domain
|
||||||
"svg" => true,
|
match all_domains.lines().map(str::to_lowercase).find(|x| x==s.to_lowercase().as_str()) {
|
||||||
"jpg" => true,
|
Some(_) => false,
|
||||||
"jpeg" => true,
|
None => true
|
||||||
"mp4" => true,
|
|
||||||
"mp3" => true,
|
|
||||||
"webp" => true,
|
|
||||||
|
|
||||||
"pdf" => true,
|
|
||||||
"json" => true,
|
|
||||||
"xml" => true,
|
|
||||||
_ => {
|
|
||||||
warn!("Might be forgetting a file extension: {s}");
|
|
||||||
false
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
None => false,
|
None => false,
|
||||||
|
|||||||
125
src/main.rs
125
src/main.rs
@@ -1,9 +1,17 @@
|
|||||||
#![feature(ip_from)]
|
#![feature(ip_from)]
|
||||||
|
#![feature(async_closure)]
|
||||||
|
#![warn(clippy::expect_used)]
|
||||||
|
#![deny(clippy::unwrap_used)]
|
||||||
|
|
||||||
extern crate html5ever;
|
extern crate html5ever;
|
||||||
|
|
||||||
|
use futures_util::StreamExt;
|
||||||
|
|
||||||
use std::{
|
use std::{
|
||||||
collections::HashSet, fs::File, io::Read, net::{IpAddr, Ipv4Addr}
|
collections::HashSet,
|
||||||
|
fs::File,
|
||||||
|
io::Read,
|
||||||
|
net::{IpAddr, Ipv4Addr},
|
||||||
};
|
};
|
||||||
|
|
||||||
use db::{connect, Website};
|
use db::{connect, Website};
|
||||||
@@ -11,13 +19,13 @@ use metrics::{counter, gauge};
|
|||||||
use metrics_exporter_prometheus::PrometheusBuilder;
|
use metrics_exporter_prometheus::PrometheusBuilder;
|
||||||
use serde::Deserialize;
|
use serde::Deserialize;
|
||||||
use surrealdb::{engine::remote::ws::Client, Surreal};
|
use surrealdb::{engine::remote::ws::Client, Surreal};
|
||||||
use tokio::task::JoinSet;
|
use tokio::{io::AsyncWriteExt, task::JoinSet};
|
||||||
use tracing::{debug, error, info, instrument, level_filters::LevelFilter, trace, trace_span};
|
use tracing::{debug, error, info, instrument, level_filters::LevelFilter, trace, trace_span};
|
||||||
use tracing_subscriber::{fmt, layer::SubscriberExt, EnvFilter, Layer, Registry};
|
use tracing_subscriber::{fmt, layer::SubscriberExt, EnvFilter, Layer, Registry};
|
||||||
|
|
||||||
mod db;
|
mod db;
|
||||||
mod parser;
|
|
||||||
mod filesystem;
|
mod filesystem;
|
||||||
|
mod parser;
|
||||||
|
|
||||||
const GET_METRIC: &str = "total_gets";
|
const GET_METRIC: &str = "total_gets";
|
||||||
const GET_IN_FLIGHT: &str = "gets_in_flight";
|
const GET_IN_FLIGHT: &str = "gets_in_flight";
|
||||||
@@ -33,11 +41,15 @@ struct Config {
|
|||||||
surreal_password: String,
|
surreal_password: String,
|
||||||
|
|
||||||
crawl_filter: String,
|
crawl_filter: String,
|
||||||
|
start_url: String,
|
||||||
budget: usize,
|
budget: usize,
|
||||||
|
batch_size: usize,
|
||||||
}
|
}
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() {
|
async fn main() {
|
||||||
|
println!("Logs and metrics are provided to the Grafana dashboard");
|
||||||
|
|
||||||
let writer = std::fs::OpenOptions::new()
|
let writer = std::fs::OpenOptions::new()
|
||||||
.append(true)
|
.append(true)
|
||||||
.create(true)
|
.create(true)
|
||||||
@@ -70,8 +82,7 @@ async fn main() {
|
|||||||
.expect("failed to install recorder/exporter");
|
.expect("failed to install recorder/exporter");
|
||||||
|
|
||||||
info!("Starting...");
|
info!("Starting...");
|
||||||
// Would probably take these in as parameters from a cli
|
|
||||||
let starting_url = "https://en.wikipedia.org/";
|
|
||||||
// When getting uncrawled pages, name must contain this variable. "" will effectively get ignored.
|
// When getting uncrawled pages, name must contain this variable. "" will effectively get ignored.
|
||||||
// let crawl_filter = "en.wikipedia.org/";
|
// let crawl_filter = "en.wikipedia.org/";
|
||||||
// let budget = 50;
|
// let budget = 50;
|
||||||
@@ -82,6 +93,7 @@ async fn main() {
|
|||||||
let _ = file.read_to_string(&mut buf);
|
let _ = file.read_to_string(&mut buf);
|
||||||
|
|
||||||
let config: Config = toml::from_str(&buf).expect("Failed to parse Crawler.toml");
|
let config: Config = toml::from_str(&buf).expect("Failed to parse Crawler.toml");
|
||||||
|
let starting_url = &config.start_url;
|
||||||
|
|
||||||
let db = connect(&config)
|
let db = connect(&config)
|
||||||
.await
|
.await
|
||||||
@@ -106,13 +118,8 @@ async fn main() {
|
|||||||
let span = trace_span!("Loop");
|
let span = trace_span!("Loop");
|
||||||
let span = span.enter();
|
let span = span.enter();
|
||||||
while crawled < config.budget {
|
while crawled < config.budget {
|
||||||
let get_num = if config.budget - crawled < 100 {
|
let uncrawled =
|
||||||
config.budget - crawled
|
get_uncrawled_links(&db, config.budget - crawled, config.crawl_filter.clone(), &config).await;
|
||||||
} else {
|
|
||||||
100
|
|
||||||
};
|
|
||||||
|
|
||||||
let uncrawled = get_uncrawled_links(&db, get_num, config.crawl_filter.clone()).await;
|
|
||||||
if uncrawled.is_empty() {
|
if uncrawled.is_empty() {
|
||||||
info!("Had more budget but finished crawling everything.");
|
info!("Had more budget but finished crawling everything.");
|
||||||
return;
|
return;
|
||||||
@@ -138,6 +145,16 @@ async fn main() {
|
|||||||
}
|
}
|
||||||
drop(span);
|
drop(span);
|
||||||
|
|
||||||
|
if let Ok(mut ok) = db
|
||||||
|
.query("count(select id from website where crawled = true)")
|
||||||
|
.await
|
||||||
|
{
|
||||||
|
let res = ok.take::<Option<usize>>(0);
|
||||||
|
if let Ok(Some(n)) = res {
|
||||||
|
info!("Total crawled pages now equals {n}");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
info!("Done");
|
info!("Done");
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -145,7 +162,6 @@ async fn main() {
|
|||||||
/// Downloads and crawls and stores a webpage.
|
/// Downloads and crawls and stores a webpage.
|
||||||
/// It is acceptable to clone `db`, `reqwest`, and `s3` because they all use `Arc`s internally. - Noted by Oliver
|
/// It is acceptable to clone `db`, `reqwest`, and `s3` because they all use `Arc`s internally. - Noted by Oliver
|
||||||
async fn process(mut site: Website, db: Surreal<Client>, reqwest: reqwest::Client) {
|
async fn process(mut site: Website, db: Surreal<Client>, reqwest: reqwest::Client) {
|
||||||
|
|
||||||
// METRICS
|
// METRICS
|
||||||
trace!("Process: {}", &site.site);
|
trace!("Process: {}", &site.site);
|
||||||
// Build the request
|
// Build the request
|
||||||
@@ -157,55 +173,77 @@ async fn process(mut site: Website, db: Surreal<Client>, reqwest: reqwest::Clien
|
|||||||
|
|
||||||
// Send the http request (get)
|
// Send the http request (get)
|
||||||
if let Ok(response) = request_builder.send().await {
|
if let Ok(response) = request_builder.send().await {
|
||||||
|
|
||||||
// METRICS
|
|
||||||
g.decrement(1);
|
|
||||||
counter!(GET_METRIC).increment(1);
|
|
||||||
|
|
||||||
// Get body from response
|
// Get body from response
|
||||||
let data = response
|
|
||||||
.text()
|
|
||||||
.await
|
|
||||||
.expect("Failed to read http response's body!");
|
|
||||||
|
|
||||||
// Store document
|
let path = filesystem::as_path(&site.site);
|
||||||
filesystem::store(&data, &site.site).await;
|
|
||||||
|
|
||||||
// Parse document and get relationships
|
// make sure that the file is good to go
|
||||||
let sites = parser::parse(&site, &data).await;
|
if let Some(mut file) = filesystem::init(&path).await {
|
||||||
|
let should_parse = path.to_string_lossy().ends_with(".html");
|
||||||
|
let mut buf: Vec<u8> = Vec::new();
|
||||||
|
|
||||||
// update self in db
|
// stream the response onto the disk
|
||||||
site.set_crawled();
|
let mut stream = response.bytes_stream();
|
||||||
Website::store_all(vec![site], &db).await;
|
while let Some(data) = stream.next().await {
|
||||||
|
match data {
|
||||||
|
Ok(data) => {
|
||||||
|
debug!("Writing at: {:?}", path);
|
||||||
|
let _ = file.write_all(&data).await;
|
||||||
|
// If we are going to parse this file later, we will save it
|
||||||
|
// into memory as well as the disk.
|
||||||
|
if should_parse {
|
||||||
|
data.iter().for_each(|f| buf.push(*f));
|
||||||
|
}
|
||||||
|
},
|
||||||
|
Err(err) => {
|
||||||
|
eprintln!("{}", err)
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// De-duplicate this list
|
if should_parse {
|
||||||
let prev_len = sites.len();
|
// Parse document and get relationships
|
||||||
let set = sites.into_iter().fold(HashSet::new(), |mut set,item| {
|
let sites = parser::parse(&site, &buf).await;
|
||||||
set.insert(item);
|
// De-duplicate this list
|
||||||
set
|
let prev_len = sites.len();
|
||||||
});
|
let set = sites.into_iter().fold(HashSet::new(), |mut set, item| {
|
||||||
let de_dupe_sites: Vec<Website> = set.into_iter().collect();
|
set.insert(item);
|
||||||
let diff = prev_len - de_dupe_sites.len();
|
set
|
||||||
trace!("Saved {diff} from being entered into the db by de-duping");
|
});
|
||||||
|
let de_dupe_sites: Vec<Website> = set.into_iter().collect();
|
||||||
|
let diff = prev_len - de_dupe_sites.len();
|
||||||
|
trace!("Saved {diff} from being entered into the db by de-duping");
|
||||||
|
// Store all the other sites so that we can link to them.
|
||||||
|
let _ = Website::store_all(de_dupe_sites, &db).await;
|
||||||
|
}
|
||||||
|
|
||||||
// Store all the other sites so that we can link to them.
|
// METRICS
|
||||||
let _ = Website::store_all(de_dupe_sites, &db).await;
|
g.decrement(1);
|
||||||
|
counter!(GET_METRIC).increment(1);
|
||||||
|
|
||||||
|
// update self in db
|
||||||
|
site.set_crawled();
|
||||||
|
Website::store_all(vec![site], &db).await;
|
||||||
|
} else {
|
||||||
|
error!("File failed to cooperate: {:?}", path);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
error!("Failed to get: {}", &site.site);
|
error!("Failed to get: {}", &site.site);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// Returns uncrawled links
|
/// Returns uncrawled links
|
||||||
#[instrument(skip(db))]
|
#[instrument(skip(db, config))]
|
||||||
async fn get_uncrawled_links(
|
async fn get_uncrawled_links(
|
||||||
db: &Surreal<Client>,
|
db: &Surreal<Client>,
|
||||||
mut count: usize,
|
mut count: usize,
|
||||||
filter: String,
|
filter: String,
|
||||||
|
config: &Config,
|
||||||
) -> Vec<Website> {
|
) -> Vec<Website> {
|
||||||
if count > 100 {
|
if count > config.batch_size {
|
||||||
count = 100
|
count = config.batch_size;
|
||||||
}
|
}
|
||||||
|
|
||||||
debug!("Getting uncrawled links");
|
debug!("Getting uncrawled links");
|
||||||
|
|
||||||
let mut response = db
|
let mut response = db
|
||||||
@@ -218,4 +256,3 @@ async fn get_uncrawled_links(
|
|||||||
.take(0)
|
.take(0)
|
||||||
.expect("Returned websites couldn't be parsed")
|
.expect("Returned websites couldn't be parsed")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
use std::default::Default;
|
use std::default::Default;
|
||||||
use std::str::FromStr;
|
|
||||||
|
|
||||||
use html5ever::tokenizer::{BufferQueue, TokenizerResult};
|
use html5ever::tokenizer::{BufferQueue, TokenizerResult};
|
||||||
use html5ever::tokenizer::{StartTag, TagToken};
|
use html5ever::tokenizer::{StartTag, TagToken};
|
||||||
@@ -63,29 +62,34 @@ impl TokenSink for Website {
|
|||||||
|
|
||||||
#[instrument(skip_all)]
|
#[instrument(skip_all)]
|
||||||
/// Parses the passed site and returns all the sites it links to.
|
/// Parses the passed site and returns all the sites it links to.
|
||||||
pub async fn parse(site: &Website, data: &str) -> Vec<Website> {
|
pub async fn parse(site: &Website, data: &[u8]) -> Vec<Website> {
|
||||||
|
debug!("Parsing {}", site.site.to_string());
|
||||||
// prep work
|
// prep work
|
||||||
let mut other_sites: Vec<Website> = Vec::new();
|
let mut other_sites: Vec<Website> = Vec::new();
|
||||||
|
|
||||||
// change data into something that can be tokenized
|
// change data into something that can be tokenized
|
||||||
let chunk = Tendril::from_str(data).expect("Failed to parse string into Tendril!");
|
let s: Result<Tendril<fmt::UTF8>, ()> = Tendril::try_from_byte_slice(data);
|
||||||
// create buffer of tokens and push our input into it
|
if let Ok(chunk) = s {
|
||||||
let token_buffer = BufferQueue::default();
|
// create buffer of tokens and push our input into it
|
||||||
token_buffer.push_back(
|
let token_buffer = BufferQueue::default();
|
||||||
chunk
|
token_buffer.push_back(
|
||||||
.try_reinterpret::<fmt::UTF8>()
|
chunk
|
||||||
.expect("Failed to reinterpret chunk!"),
|
.try_reinterpret::<fmt::UTF8>()
|
||||||
);
|
.expect("Failed to reinterpret chunk!"),
|
||||||
// create the tokenizer
|
);
|
||||||
let tokenizer = Tokenizer::new(site.clone(), TokenizerOpts::default());
|
// create the tokenizer
|
||||||
|
let tokenizer = Tokenizer::new(site.clone(), TokenizerOpts::default());
|
||||||
|
|
||||||
// go thru buffer
|
// go thru buffer
|
||||||
while let TokenizerResult::Script(mut sites) = tokenizer.feed(&token_buffer) {
|
while let TokenizerResult::Script(mut sites) = tokenizer.feed(&token_buffer) {
|
||||||
other_sites.append(&mut sites);
|
other_sites.append(&mut sites);
|
||||||
// other_sites.push(sites);
|
// other_sites.push(sites);
|
||||||
|
}
|
||||||
|
assert!(token_buffer.is_empty());
|
||||||
|
tokenizer.end();
|
||||||
|
} else {
|
||||||
|
warn!("Tendril failed to parse on: {}", site.site.to_string());
|
||||||
}
|
}
|
||||||
assert!(token_buffer.is_empty());
|
|
||||||
tokenizer.end();
|
|
||||||
|
|
||||||
other_sites
|
other_sites
|
||||||
}
|
}
|
||||||
@@ -107,7 +111,7 @@ fn try_get_url(parent: &Url, link: &str) -> Option<Url> {
|
|||||||
match Url::parse(&format!("{scheme}://{}", link)) {
|
match Url::parse(&format!("{scheme}://{}", link)) {
|
||||||
Ok(url) => Some(url),
|
Ok(url) => Some(url),
|
||||||
Err(err) => {
|
Err(err) => {
|
||||||
error!("Failed parsing realative scheme url: {}", err);
|
error!("Failed parsing relative scheme url: {}", err);
|
||||||
None
|
None
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -117,10 +121,13 @@ fn try_get_url(parent: &Url, link: &str) -> Option<Url> {
|
|||||||
match e {
|
match e {
|
||||||
url::ParseError::RelativeUrlWithoutBase => {
|
url::ParseError::RelativeUrlWithoutBase => {
|
||||||
// Is: scheme://host:port
|
// Is: scheme://host:port
|
||||||
let origin = parent.origin().ascii_serialization();
|
let mut origin = parent.origin().ascii_serialization();
|
||||||
|
if !origin.ends_with('/') && !link.starts_with('/') {
|
||||||
|
origin += "/";
|
||||||
|
}
|
||||||
let url = origin.clone() + link;
|
let url = origin.clone() + link;
|
||||||
|
|
||||||
trace!("Built `{url}` from `{origin} + {}`", link.to_string());
|
trace!("Built `{url}` from `{origin} + `{}`", link.to_string());
|
||||||
|
|
||||||
if let Ok(url) = Url::parse(&url) {
|
if let Ok(url) = Url::parse(&url) {
|
||||||
trace!("Saved relative url `{}` AS: `{}`", link, url);
|
trace!("Saved relative url `{}` AS: `{}`", link, url);
|
||||||
|
|||||||
1444
src/tlds-alpha-by-domain.txt
Normal file
1444
src/tlds-alpha-by-domain.txt
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user