Compare commits

..

30 Commits

Author SHA1 Message Date
6f98001d8e Merge pull request 'status_codes' (#8) from status_codes into main
Reviewed-on: #8
2025-07-11 00:49:27 +00:00
6790061e22 helper code 2025-07-09 15:58:22 -06:00
50606bb69e It isnt quite working yet 2025-04-17 09:59:23 -06:00
5850f19cab Merge pull request 'stream_response' (#6) from stream_response into main
Reviewed-on: #6
2025-04-17 15:39:49 +00:00
2c8546e30a logging cleanup 2025-04-17 09:36:27 -06:00
4e619d0ebc logging cleanup 2025-04-17 09:36:13 -06:00
647c4cd324 work off content-type header 2025-04-17 09:35:57 -06:00
7fab961d76 no longer how this is working 2025-04-17 09:35:26 -06:00
d3fff194f4 logging updates 2025-04-17 08:17:37 -06:00
3497312fd4 de-enshitified file saving logic 2025-04-17 08:17:29 -06:00
0fd76b1734 Merge pull request 'stream_response' (#4) from stream_response into main
Reviewed-on: #4
2025-04-15 21:23:54 +00:00
9bfa8f9108 batch_size 2025-04-15 13:38:28 -06:00
bdb1094a30 steam data to the disk 2025-04-15 13:07:47 -06:00
9aa2d9ce22 code settings 2025-04-15 13:06:53 -06:00
4b557a923c Merge pull request 'foss_storage' (#3) from foss_storage into main
Reviewed-on: #3
2025-04-15 15:11:59 +00:00
c08a20ac00 cleanup and more accuratly use metrics 2025-04-15 09:07:16 -06:00
94912e9125 change up how files are discovered 2025-04-15 09:06:57 -06:00
a9465dda6e add instructions 2025-03-31 15:05:18 -06:00
add6f00ed6 no recomp needed 2025-03-31 14:53:10 -06:00
4a433a1a77 This function sometimes throws errors, this logging should help 2025-03-31 14:18:37 -06:00
03cbcd9ae0 remove minio code 2025-03-31 14:18:11 -06:00
6fc71c7a78 add speed improvements 2025-03-21 12:14:29 -06:00
96a3ca092a :) 2025-03-21 12:11:05 -06:00
b750d88d48 working filesystem storage 2025-03-21 11:42:43 -06:00
808790a7c3 file patch; 2025-03-21 07:11:51 +00:00
2de01b2a0e remove removed code 2025-03-21 06:48:39 +00:00
be0fd5505b i think the files work better 2025-03-21 06:48:17 +00:00
a23429104c dead code removal 2025-03-21 06:03:34 +00:00
66581cc453 getting there 2025-03-21 05:59:40 +00:00
7df19a480f updates 2025-03-20 15:11:01 -06:00
16 changed files with 387 additions and 448 deletions

3
.gitignore vendored
View File

@ -4,4 +4,5 @@
perf.data
flamegraph.svg
perf.data.old
/docker/logs/*
/docker/logs/*
/downloaded

19
.vscode/launch.json vendored
View File

@ -7,18 +7,15 @@
{
"type": "lldb",
"request": "launch",
"name": "Debug executable 'surreal_spider'",
"env": {
"RUST_LOG": "surreal_spider=trace,reqwest=info",
},
"name": "Debug executable 'internet_mapper'",
"cargo": {
"args": [
"build",
"--bin=surreal_spider",
"--package=surreal_spider"
"--bin=internet_mapper",
"--package=internet_mapper"
],
"filter": {
"name": "surreal_spider",
"name": "internet_mapper",
"kind": "bin"
}
},
@ -28,16 +25,16 @@
{
"type": "lldb",
"request": "launch",
"name": "Debug unit tests in executable 'surreal_spider'",
"name": "Debug unit tests in executable 'internet_mapper'",
"cargo": {
"args": [
"test",
"--no-run",
"--bin=surreal_spider",
"--package=surreal_spider"
"--bin=internet_mapper",
"--package=internet_mapper"
],
"filter": {
"name": "surreal_spider",
"name": "internet_mapper",
"kind": "bin"
}
},

View File

@ -3,6 +3,6 @@
"creds",
"reqwest",
"rustls",
"surql"
"surql",
]
}

1
Cargo.lock generated
View File

@ -1966,6 +1966,7 @@ name = "internet_mapper"
version = "0.1.0"
dependencies = [
"base64 0.22.1",
"futures-util",
"html5ever 0.29.1",
"metrics",
"metrics-exporter-prometheus",

View File

@ -5,12 +5,13 @@ edition = "2021"
[dependencies]
base64 = "0.22.1"
futures-util = "0.3.31"
html5ever = "0.29"
metrics = "0.24.1"
metrics-exporter-prometheus = { version = "0.16.2", features=["http-listener"]}
# minio = "0.1.0"
minio = {git="https://github.com/minio/minio-rs.git", rev = "c28f576"}
reqwest = { version = "0.12", features = ["gzip", "default", "rustls-tls"] }
reqwest = { version = "0.12", features = ["gzip", "default", "rustls-tls", "stream"] }
serde = { version = "1.0", features = ["derive"] }
surrealdb = "2.2"
tokio = { version="1.41.0", features = ["full"] }

View File

@ -3,14 +3,12 @@ surreal_url = "localhost:8000"
surreal_username = "root"
surreal_password = "root"
surreal_ns = "test"
surreal_db = "v1.16"
# Minio config
s3_bucket = "v1.16"
s3_url = "http://localhost:9000"
s3_access_key = "DwJfDDVIbmCmfAblwSqp"
s3_secret_key = "V4UqvC1Vm4AwLE5FAhu2gxlMfvexTBQnDxuy8uZx"
surreal_db = "v1.21.1"
# Crawler config
crawl_filter = "en.wikipedia.com"
# crawl_filter = "https://ftpgeoinfo.msl.mt.gov/Data/Spatial/MSDI/Imagery/2023_NAIP/UTM_County_Mosaics/"
crawl_filter = "https://oliveratkinson.net"
# start_url = "https://ftpgeoinfo.msl.mt.gov/Data/Spatial/MSDI/Imagery/2023_NAIP/UTM_County_Mosaics/"
start_url = "https://oliveratkinson.net"
budget = 1000
batch_size = 500

View File

@ -2,14 +2,54 @@
Crawls sites saving all the found links to a surrealdb database. It then proceeds to take batches of 100 uncrawled links untill the crawl budget is reached. It saves the data of each site in a minio database.
## How to use
1. Clone the repo and `cd` into it.
2. Build the repo with `cargo build -r`
3. Start the docker conatiners
1. cd into the docker folder `cd docker`
2. Bring up the docker containers `docker compose up -d`
4. From the project's root, edit the `Crawler.toml` file to your liking.
5. Run with `./target/release/internet_mapper`
You can view stats of the project at `http://<your-ip>:3000/dashboards`
```bash
# Untested script but probably works
git clone https://git.oliveratkinson.net/Oliver/internet_mapper.git
cd internet_mapper
cargo build -r
cd docker
docker compose up -d
cd ..
$EDITOR Crawler.toml
./target/release/internet_mapper
```
### TODO
- [ ] Domain filtering - prevent the crawler from going on alternate versions of wikipedia.
- [x] Domain filtering - prevent the crawler from going on alternate versions of wikipedia.
- [ ] Conditionally save content - based on filename or file contents
- [x] GUI / TUI ? - Graphana
- [x] Better asynchronous getting of the sites. Currently it all happens serially.
- [ ] Allow for storing asynchronously
- [x] Allow for storing asynchronously - dropping the "links to" logic fixes this need
- [x] Control crawler via config file (no recompliation needed)
3/17/25: Took >1hr to crawl 100 pages
3/19/25: Took 20min to crawl 1000 pages
This ment we stored 1000 pages, 142,997 urls, and 1,425,798 links between the two.
3/20/25: Took 5min to crawl 1000 pages
3/21/25: Took 3min to crawl 1000 pages
# About
![Screenshot](/pngs/graphana.png)
3/19/25: Took 20min to crawl 100 pages
This ment we stored 100 pages, 142,997 urls, and 1,425,798 links between the two.

View File

@ -14,22 +14,6 @@ services:
- --pass
- root
- rocksdb:/mydata/database.db
minio:
image: quay.io/minio/minio
ports:
- 9000:9000
- 9001:9001
environment:
- MINIO_ROOT_USER=root
- MINIO_ROOT_PASSWORD=an8charpassword
- MINIO_PROMETHEUS_AUTH_TYPE=public
volumes:
- minio_storage:/data
command:
- server
- /data
- --console-address
- ":9001"
alloy:
image: grafana/alloy:latest
@ -82,4 +66,3 @@ volumes:
grafana_storage:
alloy_storage:
surrealdb_storage:
minio_storage:

View File

@ -8,13 +8,10 @@ scrape_configs:
# change this your machine's ip, localhost won't work
# because localhost refers to the docker container.
- targets: ['172.20.239.48:2500']
#- targets: ['192.168.8.209:2500']
- job_name: loki
static_configs:
- targets: ['loki:3100']
- job_name: prometheus
static_configs:
- targets: ['localhost:9090']
- job_name: minio
metrics_path: /minio/v2/metrics/cluster
static_configs:
- targets: ['minio:9000']

BIN
pngs/graphana.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 264 KiB

186
src/db.rs
View File

@ -1,59 +1,37 @@
use base64::{
alphabet,
engine::{self, general_purpose},
Engine,
};
use metrics::counter;
use serde::{ser::SerializeStruct, Deserialize, Serialize};
use std::{collections::HashSet, fmt::Debug, sync::LazyLock, time::Instant};
use std::fmt::Debug;
use serde::{Deserialize, Serialize};
use surrealdb::{
engine::remote::ws::{Client, Ws},
opt::auth::Root,
sql::Thing,
Error::Api,
Response, Surreal,
Surreal,
};
use tokio::sync::Mutex;
use tracing::{error, instrument, trace, warn};
use tracing::{error, instrument, trace};
use url::Url;
use crate::{Config, Timer};
use crate::Config;
// static LOCK: LazyLock<Arc<Mutex<bool>>> = LazyLock::new(|| Arc::new(Mutex::new(true)));
static LOCK: LazyLock<Mutex<bool>> = LazyLock::new(|| Mutex::new(true));
const STORE: &str = "surql_store_calls";
const CUSTOM_ENGINE: engine::GeneralPurpose =
engine::GeneralPurpose::new(&alphabet::URL_SAFE, general_purpose::NO_PAD);
const TIME_SPENT_ON_LOCK: &'static str = "surql_lock_waiting_ms";
const STORE: &'static str = "surql_store_calls";
const LINK: &'static str = "surql_link_calls";
#[derive(Deserialize, Clone, Hash, Eq, PartialEq)]
#[derive(Serialize, Deserialize, Clone, Eq, PartialEq, Hash)]
pub struct Website {
/// The url that this data is found at
pub site: Url,
/// Wether or not this link has been crawled yet
pub crawled: bool,
}
impl Serialize for Website {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
let mut state = serializer.serialize_struct("Website", 2)?;
state.serialize_field("crawled", &self.crawled)?;
// to_string() calls the correct naming of site
state.serialize_field("site", &self.site.to_string())?;
state.end()
}
/// 200, 404, etc
pub status_code: u16,
}
// manual impl to make tracing look nicer
impl Debug for Website {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Website").field("site", &self.site).finish()
f.debug_struct("Website")
.field("host", &self.site.host())
.field("path", &self.site.path())
.field("status_code", &self.status_code)
.finish()
}
}
@ -64,141 +42,43 @@ impl Website {
Ok(a) => a,
Err(_) => todo!(),
};
Self { crawled, site }
}
pub fn set_crawled(&mut self) {
trace!("Set crawled to true");
self.crawled = true
}
pub fn get_url_as_string(site: &Url) -> String {
let domain = match site.domain() {
Some(s) => s.to_string(),
None => {
warn!(
"Failed to get domain of URL: {}, falling back to 'localhost'",
site.to_string()
);
"localhost".to_string()
}
};
let path = site.path();
domain + path
}
pub fn get_url_as_b64_path(site: &Url) -> String {
let domain = site.domain().unwrap_or("DOMAIN").to_string();
let path = &CUSTOM_ENGINE.encode(site.path());
domain + path
}
#[instrument(skip_all)]
pub async fn links_to(&self, other: Vec<Thing>, db: &Surreal<Client>) {
let len = other.len();
if len == 0 {
return;
Self {
crawled,
site,
status_code: 0,
}
let from = &self.site;
// let to = other.site.to_string();
trace!("Linking {} pages to {from}", other.len());
let msg = format!("Linked {len} pages to {from}");
let timer = Timer::start(&msg);
// prevent the timer from being dropped instantly.
let _ = timer;
counter!(LINK).increment(1);
match db
.query("COUNT(RELATE (SELECT id FROM website WHERE site = $in) -> links_to -> $out)")
.bind(("in", from.clone()))
.bind(("out", other))
.await
{
Ok(mut e) => {
// The relate could technically "fail" (not relate anything), this just means that
// the query was ok.
let _: Response = e;
if let Ok(vec) = e.take(0) {
let _: Vec<usize> = vec;
if let Some(num) = vec.get(0) {
if *num == len {
trace!("Link for {from} OK - {num}/{len}");
return;
} else {
error!(
"Didn't link all the records. {num}/{len}. Surreal response: {:?}",
e
);
return;
}
}
}
error!("Linking request succeeded but couldn't verify the results.");
}
Err(e) => {
error!("{}", e.to_string());
}
}
}
pub async fn store_self(&self, db: &Surreal<Client>) {
counter!(STORE).increment(1);
db.query(
"INSERT INTO website $self
ON DUPLICATE KEY UPDATE
crawled = crawled OR $input.crawled
RETURN VALUE id;
",
)
.await
.expect("Failed to store self");
}
// Insert ever item in the vec into surreal, crawled state will be preserved as TRUE
// if already in the database as such or incoming data is TRUE.
pub async fn store_all(all: HashSet<Self>, db: &Surreal<Client>) -> Vec<Thing> {
// NOTES:
// * all incoming Websites come in as !crawled
// * there are potentially duplicates in all
#[instrument(skip(db))]
pub async fn store_all(all: Vec<Self>, db: &Surreal<Client>) -> Vec<Thing> {
counter!(STORE).increment(1);
let mut things = Vec::with_capacity(all.len());
let now = Instant::now();
let lock = LOCK.lock().await;
counter!(TIME_SPENT_ON_LOCK).increment(now.elapsed().as_millis() as u64);
let mut results = Vec::with_capacity(all.len());
// FIXME failes *sometimes* because "Resource Busy"
match db
.query(
// TODO making this an upsert would make sense, but
// upserts seem to be broken.
//
// Doesn't look like upsert can take in an array, so insert
// it is...
//
"INSERT INTO website $array
ON DUPLICATE KEY UPDATE
last_write = time::now()
RETURN VALUE id;"
,
accessed_at = time::now(),
status_code = $input.status_code,
crawled = crawled OR $input.crawled
RETURN VALUE id;
",
)
.bind(("array", all))
.await
{
Ok(mut id) => match id.take::<Vec<Thing>>(0) {
Ok(mut x) => results.append(&mut x),
Err(err) => {
error!("{:?}", err);
}
Ok(mut x) => things.append(&mut x),
Err(err) => error!("{:?}", err),
},
Err(err) => error!("{:?}", err),
Err(err) => {
error!("{:?}", err);
}
}
drop(lock);
results
things
}
}

66
src/filesystem.rs Normal file
View File

@ -0,0 +1,66 @@
use std::{io::ErrorKind, path::PathBuf};
use reqwest::header::HeaderValue;
use tokio::fs;
use tracing::{error, trace, warn};
use url::Url;
pub fn as_path(url: &Url, content_type: &HeaderValue) -> PathBuf {
// extract data from url to save it accurately
let mut url_path = PathBuf::from("./downloaded/".to_string() + url.domain().unwrap_or("UnknownDomain") + url.path());
if let Ok(header) = content_type.to_str() {
// text/html; charset=UTF-8; option=value
let ttype = if let Some((t, _)) = header.split_once(';') {
t
} else {
header
};
if let Some((ttype, subtype)) = ttype.split_once('/') {
trace!("Found Content-Type to be: {ttype}/{subtype} for {}", url.to_string());
// If the Content-Type header is "*/html" (most likely "text/html") and the path's
// extension is anything but html:
if subtype=="html" && !url_path.extension().is_some_and(|f| f=="html" || f=="htm" ) {
// time to slap a index.html to the end of that path there!
url_path = url_path.join("index.html");
}
}
} else {
warn!("Header: {:?} couldn't be parsed into a string!", content_type);
}
trace!("Final path for {} is: {:?}", url, url_path);
url_path
}
pub async fn init(filename: &PathBuf) -> Option<fs::File> {
let file = async || tokio::fs::OpenOptions::new()
.append(true)
.create(true)
.open(&filename).await;
match file().await {
Ok(ok) => Some(ok),
Err(err) => {
// the file/folder isn't found
if err.kind() == ErrorKind::NotFound {
if let Some(parent ) = &filename.parent() {
// create the folders
if let Err(err) = fs::create_dir_all(&parent).await {
error!("Dir creation: {err} {:?}", filename);
eprintln!("{}", err)
} else if let Ok(ok) = file().await {
return Some(ok);
}
} else {
error!("Couldn't get file's parents: {:?}", &filename);
}
} else {
error!("File open error: {err} {:?}", filename);
}
// we don't care about other errors, we can't/shouldn't fix them
None
}
}
}

View File

@ -1,29 +1,35 @@
#![feature(ip_from)]
#![warn(clippy::expect_used)]
#![deny(clippy::unwrap_used)]
extern crate html5ever;
use futures_util::StreamExt;
use std::{
collections::HashSet, fs::File, io::Read, net::{IpAddr, Ipv4Addr}, time::Instant
collections::HashSet,
fs::File,
io::Read,
net::{IpAddr, Ipv4Addr},
};
use db::{connect, Website};
use metrics::{counter, gauge};
use metrics_exporter_prometheus::PrometheusBuilder;
use s3::S3;
use serde::Deserialize;
use surrealdb::{engine::remote::ws::Client, Surreal};
use tokio::task::JoinSet;
use tracing::{debug, error, info, instrument, trace, trace_span, warn};
use tokio::{io::{AsyncWriteExt, BufWriter}, task::JoinSet};
use tracing::{debug, debug_span, error, info, instrument, level_filters::LevelFilter, trace, trace_span, warn};
use tracing_subscriber::{fmt, layer::SubscriberExt, EnvFilter, Layer, Registry};
mod db;
mod filesystem;
mod parser;
mod s3;
const GET_METRIC: &'static str = "total_gets";
const GET_IN_FLIGHT: &'static str = "gets_in_flight";
const SITES_CRAWLED: &'static str = "pages_crawled";
const BEING_PROCESSED: &'static str = "pages_being_processed";
const GET_METRIC: &str = "total_gets";
const GET_IN_FLIGHT: &str = "gets_in_flight";
const SITES_CRAWLED: &str = "pages_crawled";
const BEING_PROCESSED: &str = "pages_being_processed";
#[derive(Deserialize)]
struct Config {
@ -33,18 +39,15 @@ struct Config {
surreal_username: String,
surreal_password: String,
s3_url: String,
s3_bucket: String,
s3_access_key: String,
s3_secret_key: String,
crawl_filter: String,
start_url: String,
budget: usize,
batch_size: usize,
}
#[tokio::main]
async fn main() {
let total_runtime = Timer::start("Completed");
println!("Logs and metrics are provided to the Grafana dashboard");
let writer = std::fs::OpenOptions::new()
.append(true)
@ -52,7 +55,9 @@ async fn main() {
.open("./docker/logs/tracing.log")
.expect("Couldn't make log file!");
let filter = EnvFilter::from_default_env();
let filter = EnvFilter::builder()
.with_default_directive(LevelFilter::DEBUG.into())
.from_env_lossy();
let registry = Registry::default().with(
fmt::layer()
@ -75,9 +80,8 @@ async fn main() {
.install()
.expect("failed to install recorder/exporter");
debug!("Starting...");
// Would probably take these in as parameters from a cli
let starting_url = "https://en.wikipedia.org/";
info!("Starting...");
// When getting uncrawled pages, name must contain this variable. "" will effectively get ignored.
// let crawl_filter = "en.wikipedia.org/";
// let budget = 50;
@ -88,13 +92,11 @@ async fn main() {
let _ = file.read_to_string(&mut buf);
let config: Config = toml::from_str(&buf).expect("Failed to parse Crawler.toml");
let starting_url = &config.start_url;
let db = connect(&config)
.await
.expect("Failed to connect to surreal, aborting.");
let s3 = S3::connect(&config)
.await
.expect("Failed to connect to minio, aborting.\n\nThis probably means you need to login to the minio console and get a new access key!\n\n(Probably here) http://localhost:9001/access-keys/new-account\n\n");
let reqwest = reqwest::Client::builder()
// .use_rustls_tls()
@ -107,43 +109,33 @@ async fn main() {
let span = trace_span!("Pre-Loop");
let pre_loop_span = span.enter();
// Download the site
let site = Website::new(&starting_url, false);
process(site, db.clone(), reqwest.clone(), s3.clone()).await;
let site = Website::new(starting_url, false);
process(site, db.clone(), reqwest.clone()).await;
drop(pre_loop_span);
let span = trace_span!("Loop");
let span = span.enter();
while crawled < config.budget {
let get_num = if config.budget - crawled < 100 {
config.budget - crawled
} else {
100
};
let uncrawled = get_uncrawled_links(&db, get_num, config.crawl_filter.clone()).await;
if uncrawled.len() == 0 {
let uncrawled =
get_uncrawled_links(&db, config.budget - crawled, config.crawl_filter.clone(), &config).await;
if uncrawled.is_empty() {
info!("Had more budget but finished crawling everything.");
return;
}
debug!("Crawling {} pages...", uncrawled.len());
let span = trace_span!("Crawling");
let _ = span.enter();
{
let mut futures = JoinSet::new();
for site in uncrawled {
gauge!(BEING_PROCESSED).increment(1);
futures.spawn(process(site, db.clone(), reqwest.clone(), s3.clone()));
futures.spawn(process(site, db.clone(), reqwest.clone()));
// let percent = format!("{:.2}%", (crawled as f32 / budget as f32) * 100f32);
// info!("Crawled {crawled} out of {budget} pages. ({percent})");
}
debug!("Joining {} futures...", futures.len());
let c = counter!(SITES_CRAWLED);
// As futures complete runs code in while block
while let Some(_) = futures.join_next().await {
while futures.join_next().await.is_some() {
c.increment(1);
gauge!(BEING_PROCESSED).decrement(1);
crawled += 1;
@ -152,79 +144,137 @@ async fn main() {
}
drop(span);
debug!("Done");
drop(total_runtime);
if let Ok(mut ok) = db
.query("count(select id from website where crawled = true)")
.await
{
let res = ok.take::<Option<usize>>(0);
if let Ok(Some(n)) = res {
info!("Total crawled pages now equals {n}");
}
}
info!("Done");
}
#[instrument(skip(db, s3, reqwest))]
#[instrument(skip(db, reqwest))]
/// Downloads and crawls and stores a webpage.
/// It is acceptable to clone `db`, `reqwest`, and `s3` because they all use `Arc`s internally. - Noted by Oliver
async fn process(mut site: Website, db: Surreal<Client>, reqwest: reqwest::Client, s3: S3) {
async fn process(mut site: Website, db: Surreal<Client>, reqwest: reqwest::Client) {
// METRICS
trace!("Process: {}", &site.site);
let timer = Timer::start("Built request");
// Build the request
let request_builder = reqwest.get(&site.site.to_string());
// METRICS
timer.stop();
let request_builder = reqwest.get(site.site.to_string());
// METRICS
let g = gauge!(GET_IN_FLIGHT);
g.increment(1);
counter!(GET_METRIC).increment(1);
let timer = Timer::start("Got page");
// Send the http request (get)
if let Ok(response) = request_builder.send().await {
let headers = response.headers();
let code = response.status();
// METRICS
timer.stop();
g.decrement(1);
#[allow(non_snake_case)]
let CT = headers.get("Content-Type");
let ct = headers.get("content-type");
// Get body from response
let data = response
.text()
.await
.expect("Failed to read http response's body!");
// Store document
s3.store(&data, &site.site).await;
let ct = match (CT,ct) {
(None, None) => {
warn!("Server did not respond with Content-Type header. Url: {} Headers: ({:?})", site.site.to_string(), headers);
return
},
(None, Some(a)) => a,
(Some(a), None) => a,
(Some(a), Some(_)) => a,
};
// Parse document and get relationships
let sites = parser::parse(&site, &data).await;
// update self in db
site.set_crawled();
site.store_self(&db).await;
// create filepath (handles / -> /index.html)
let path = filesystem::as_path(&site.site, ct);
// de duplicate this list
let set = sites.iter().fold(HashSet::new(), |mut set, item| {
// TODO seems kinda dumb to clone everything.
set.insert(item.clone());
set
});
trace!("Shrunk items to store from {} to {}", sites.len(), set.len());
// make sure that the file is good to go
if let Some(file) = filesystem::init(&path).await {
// Get body from response
// stream the response onto the disk
let mut stream = response.bytes_stream();
// Store all the other sites so that we can link to them.
let others = Website::store_all(set, &db).await;
let should_parse = path.to_string_lossy().ends_with(".html");
let mut writer = BufWriter::new(file);
let mut buf: Vec<u8> = Vec::new();
// Make the database's links reflect the html links between sites
site.links_to(others, &db).await;
// Write file to disk
info!("Writing at: {:?}", path);
while let Some(data) = stream.next().await {
match data {
Ok(data) => {
let _ = writer.write_all(&data).await;
// If we are going to parse this file later, we will save it
// into memory as well as the disk.
// We do this because the data here might be incomplete
if should_parse {
data.iter().for_each(|f| buf.push(*f));
}
},
Err(err) => {
eprintln!("{}", err)
},
}
}
let _ = writer.flush();
// (If needed) Parse the file
if should_parse {
let span = debug_span!("Should Parse");
let enter = span.enter();
// Parse document and get relationships
let sites = parser::parse(&site, &buf).await;
// De-duplicate this list
let prev_len = sites.len();
let set = sites.into_iter().fold(HashSet::new(), |mut set, item| {
set.insert(item);
set
});
let de_dupe_sites: Vec<Website> = set.into_iter().collect();
let diff = prev_len - de_dupe_sites.len();
trace!("Saved {diff} from being entered into the db by de-duping");
// Store all the other sites so that we can link to them.
let _ = Website::store_all(de_dupe_sites, &db).await;
drop(enter);
}
// METRICS
g.decrement(1);
counter!(GET_METRIC).increment(1);
// update self in db
site.crawled = true;
site.status_code = code.as_u16();
Website::store_all(vec![site.clone()], &db).await;
} else {
error!("File failed to cooperate: {:?}", path);
}
trace!("Done processing: {}", &site.site);
} else {
error!("Failed to get: {}", &site.site);
}
}
/// Returns uncrawled links
#[instrument(skip(db))]
#[instrument(skip(db, config))]
async fn get_uncrawled_links(
db: &Surreal<Client>,
mut count: usize,
filter: String,
config: &Config,
) -> Vec<Website> {
if count > 100 {
count = 100
if count > config.batch_size {
count = config.batch_size;
}
debug!("Getting uncrawled links");
let mut response = db
@ -237,34 +287,3 @@ async fn get_uncrawled_links(
.take(0)
.expect("Returned websites couldn't be parsed")
}
pub struct Timer<'a> {
start: Instant,
msg: &'a str,
}
impl<'a> Timer<'a> {
#[inline]
pub fn start(msg: &'a str) -> Self {
Self {
start: Instant::now(),
msg,
}
}
pub fn stop(&self) -> f64 {
let dif = self.start.elapsed().as_micros();
let ms = dif as f64 / 1000.;
if ms > 200. {
warn!("{}", format!("{} in {:.3}ms", self.msg, ms));
}
ms
}
}
impl Drop for Timer<'_> {
fn drop(&mut self) {
self.stop();
}
}

View File

@ -1,23 +1,24 @@
use std::default::Default;
use std::str::FromStr;
use html5ever::tokenizer::{BufferQueue, TokenizerResult};
use html5ever::tokenizer::{StartTag, TagToken};
use html5ever::tokenizer::{Token, TokenSink, TokenSinkResult, Tokenizer, TokenizerOpts};
use html5ever::{local_name, tendril::*};
use tracing::instrument;
use tracing::{debug, error, instrument, trace, warn};
use url::Url;
use crate::db::Website;
use crate::Timer;
impl TokenSink for Website {
type Handle = Vec<Website>;
#[instrument(skip(token, _line_number))]
fn process_token(&self, token: Token, _line_number: u64) -> TokenSinkResult<Self::Handle> {
match token {
TagToken(tag) => {
if tag.kind == StartTag {
match tag.name {
// this should be all the html elements that have links
local_name!("a")
| local_name!("audio")
| local_name!("area")
@ -32,23 +33,18 @@ impl TokenSink for Website {
let attr_name = attr.name.local.to_string();
if attr_name == "src" || attr_name == "href" || attr_name == "data"
{
// Get clone of the current site object
let mut web = self.clone();
trace!("Found `{}` in html `{}` tag", &attr.value, tag.name);
let url = try_get_url(&self.site, &attr.value);
// Set url
let mut url = web.site;
url.set_fragment(None); // removes #xyz
let joined = url
.join(&attr.value)
.expect("Failed to join url during parsing!");
web.site = joined;
web.crawled = false;
links.push(web);
if let Some(mut parsed) = url {
parsed.set_query(None);
parsed.set_fragment(None);
trace!("Final cleaned URL: `{}`", parsed.to_string());
let web = Website::new(&parsed.to_string(), false);
links.push(web);
}
}
}
return TokenSinkResult::Script(links);
}
local_name!("button") | local_name!("meta") | local_name!("iframe") => {
@ -66,30 +62,90 @@ impl TokenSink for Website {
#[instrument(skip_all)]
/// Parses the passed site and returns all the sites it links to.
pub async fn parse(site: &Website, data: &str) -> Vec<Website> {
pub async fn parse(site: &Website, data: &[u8]) -> Vec<Website> {
debug!("Parsing {}", site.site.to_string());
// prep work
let mut other_sites: Vec<Website> = Vec::new();
let _t = Timer::start("Parsed page");
// change data into something that can be tokenized
let chunk = Tendril::from_str(&data).expect("Failed to parse string into Tendril!");
// create buffer of tokens and push our input into it
let mut token_buffer = BufferQueue::default();
token_buffer.push_back(
chunk
.try_reinterpret::<fmt::UTF8>()
.expect("Failed to reinterprt chunk!"),
);
// create the tokenizer
let tokenizer = Tokenizer::new(site.clone(), TokenizerOpts::default());
let s: Result<Tendril<fmt::UTF8>, ()> = Tendril::try_from_byte_slice(data);
if let Ok(chunk) = s {
// create buffer of tokens and push our input into it
let token_buffer = BufferQueue::default();
token_buffer.push_back(
chunk
.try_reinterpret::<fmt::UTF8>()
.expect("Failed to reinterpret chunk!"),
);
// create the tokenizer
let tokenizer = Tokenizer::new(site.clone(), TokenizerOpts::default());
// go thru buffer
while let TokenizerResult::Script(mut sites) = tokenizer.feed(&mut token_buffer) {
other_sites.append(&mut sites);
// other_sites.push(sites);
// go thru buffer
while let TokenizerResult::Script(mut sites) = tokenizer.feed(&token_buffer) {
other_sites.append(&mut sites);
// other_sites.push(sites);
}
assert!(token_buffer.is_empty());
tokenizer.end();
} else {
warn!("Tendril failed to parse on: {}", site.site.to_string());
}
assert!(token_buffer.is_empty());
tokenizer.end();
other_sites
}
#[instrument]
fn try_get_url(parent: &Url, link: &str) -> Option<Url> {
match Url::parse(link) {
Ok(ok) => Some(ok),
Err(e) => {
if link.starts_with('#') {
trace!("Rejecting # url");
None
} else if link.starts_with("//") {
// if a url starts with "//" is assumed that it will adopt
// the same scheme as it's parent
// https://stackoverflow.com/questions/9646407/two-forward-slashes-in-a-url-src-href-attribute
let scheme = parent.scheme();
match Url::parse(&format!("{scheme}://{}", link)) {
Ok(url) => Some(url),
Err(err) => {
error!("Failed parsing relative scheme url: {}", err);
None
}
}
} else {
// # This is some sort of realative url, gonna try patching it up into an absolute
// url
match e {
url::ParseError::RelativeUrlWithoutBase => {
// Is: scheme://host:port
let mut origin = parent.origin().ascii_serialization();
if !origin.ends_with('/') && !link.starts_with('/') {
origin += "/";
}
let url = origin.clone() + link;
if let Ok(url) = Url::parse(&url) {
trace!("Built `{url}` from `{origin} + `{}`", link.to_string());
Some(url)
} else {
error!(
"Failed to reconstruct a url from relative url: `{}` on site: `{}`. Failed url was: {}",
link,
parent.to_string(),
url
);
None
}
}
_ => {
error!("MISC error: {:?} {:?}", e, link);
None
}
}
}
}
}
}

101
src/s3.rs
View File

@ -1,101 +0,0 @@
use metrics::counter;
use minio::s3::{
args::{BucketExistsArgs, MakeBucketArgs},
client::ClientBuilder,
creds::StaticProvider,
error::Error,
http::BaseUrl,
Client,
};
use tracing::{instrument, trace, warn};
use url::Url;
use crate::{db::Website, Config, Timer};
const S3_ROUND_TRIP_METRIC: &'static str = "s3_trips";
#[derive(Clone)]
pub struct S3 {
bucket_name: String,
client: Client,
}
impl S3 {
#[instrument(skip_all, name = "S3")]
pub async fn connect(config: &Config) -> Result<Self, Error> {
let base_url = config
.s3_url
.parse::<BaseUrl>()
.expect("Failed to parse url into BaseUrl");
let static_provider =
StaticProvider::new(&config.s3_access_key, &config.s3_secret_key, None);
let client = ClientBuilder::new(base_url)
.provider(Some(Box::new(static_provider)))
.build()?;
trace!("Checking bucket...");
let exists = client
.bucket_exists(
&BucketExistsArgs::new(&config.s3_bucket)
.expect("Failed to check if bucket exists"),
)
.await?;
counter!(S3_ROUND_TRIP_METRIC).increment(1);
if !exists {
trace!("Creating bucket...");
client
.make_bucket(
&MakeBucketArgs::new(&config.s3_bucket).expect("Failed to create bucket!"),
)
.await?;
}
counter!(S3_ROUND_TRIP_METRIC).increment(1);
trace!("Connection successful");
Ok(Self {
bucket_name: config.s3_bucket.to_owned(),
client,
})
}
#[instrument(name = "s3_store", skip_all)]
pub async fn store(&self, data: &str, url: &Url) {
let counter = counter!(S3_ROUND_TRIP_METRIC);
let t = Timer::start("Stored page");
let _ = t; // prevent compiler drop
let filename = Website::get_url_as_string(url);
trace!("Storing {} as {filename}", url.to_string());
counter.increment(1);
let _ = match &self
.client
.put_object_content(&self.bucket_name, &filename, data.to_owned())
.send()
.await
{
Ok(_) => {}
Err(err) => match err {
Error::InvalidObjectName(_) => {
// This code will really only run if the url has non-english chars
warn!("Tried storing invalid object name, retrying with Base64 encoding. Last try.");
let filename: String = Website::get_url_as_b64_path(url);
counter.increment(1);
let _ = &self
.client
.put_object_content(&self.bucket_name, &filename, data.to_owned())
.send()
.await
.unwrap();
}
_ => {}
},
};
}
}

View File

@ -5,4 +5,5 @@ DEFINE INDEX IF NOT EXISTS idx ON TABLE website COLUMNS site UNIQUE;
DEFINE FIELD IF NOT EXISTS crawled ON TABLE website TYPE bool;
DEFINE FIELD IF NOT EXISTS created ON TABLE website VALUE time::now();
DEFINE FIELD IF NOT EXISTS accessed_at ON TABLE website VALUE time::now();
DEFINE FIELD IF NOT EXISTS first_accessed_at ON TABLE website VALUE time::now();