Compare commits

12 Commits

Author SHA1 Message Date
table
f7a3ca8fd7 changes 2025-08-04 23:02:05 +00:00
6790061e22 helper code 2025-07-09 15:58:22 -06:00
50606bb69e It isnt quite working yet 2025-04-17 09:59:23 -06:00
5850f19cab Merge pull request 'stream_response' (#6) from stream_response into main
Reviewed-on: #6
2025-04-17 15:39:49 +00:00
2c8546e30a logging cleanup 2025-04-17 09:36:27 -06:00
4e619d0ebc logging cleanup 2025-04-17 09:36:13 -06:00
647c4cd324 work off content-type header 2025-04-17 09:35:57 -06:00
7fab961d76 no longer how this is working 2025-04-17 09:35:26 -06:00
d3fff194f4 logging updates 2025-04-17 08:17:37 -06:00
3497312fd4 de-enshitified file saving logic 2025-04-17 08:17:29 -06:00
0fd76b1734 Merge pull request 'stream_response' (#4) from stream_response into main
Reviewed-on: #4
2025-04-15 21:23:54 +00:00
9bfa8f9108 batch_size 2025-04-15 13:38:28 -06:00
7 changed files with 94 additions and 1515 deletions

View File

@@ -3,9 +3,10 @@ surreal_url = "localhost:8000"
surreal_username = "root" surreal_username = "root"
surreal_password = "root" surreal_password = "root"
surreal_ns = "test" surreal_ns = "test"
surreal_db = "v1.19.5" surreal_db = "v1.21.1"
# Crawler config # Crawler config
crawl_filter = "en.wikipedia.org" crawl_filter = "https://ftpgeoinfo.msl.mt.gov/Data/Spatial/MSDI"
start_url = "https://en.wikipedia.org" start_url = "https://ftpgeoinfo.msl.mt.gov/Data/Spatial/MSDI"
budget = 100 budget = 10000
batch_size = 50

View File

@@ -7,7 +7,7 @@ scrape_configs:
static_configs: static_configs:
# change this your machine's ip, localhost won't work # change this your machine's ip, localhost won't work
# because localhost refers to the docker container. # because localhost refers to the docker container.
- targets: ['172.20.239.48:2500'] - targets: ['192.168.1.200:2500']
#- targets: ['192.168.8.209:2500'] #- targets: ['192.168.8.209:2500']
- job_name: loki - job_name: loki
static_configs: static_configs:

View File

@@ -20,12 +20,18 @@ pub struct Website {
pub site: Url, pub site: Url,
/// Wether or not this link has been crawled yet /// Wether or not this link has been crawled yet
pub crawled: bool, pub crawled: bool,
/// 200, 404, etc
pub status_code: u16,
} }
// manual impl to make tracing look nicer // manual impl to make tracing look nicer
impl Debug for Website { impl Debug for Website {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Website").field("site", &self.site).finish() f.debug_struct("Website")
.field("host", &self.site.host())
.field("path", &self.site.path())
.field("status_code", &self.status_code)
.finish()
} }
} }
@@ -38,15 +44,11 @@ impl Website {
}; };
Self { Self {
crawled, crawled,
site site,
status_code: 0,
} }
} }
pub fn set_crawled(&mut self) {
trace!("Set crawled to true");
self.crawled = true
}
// Insert ever item in the vec into surreal, crawled state will be preserved as TRUE // Insert ever item in the vec into surreal, crawled state will be preserved as TRUE
// if already in the database as such or incoming data is TRUE. // if already in the database as such or incoming data is TRUE.
#[instrument(skip(db))] #[instrument(skip(db))]
@@ -54,11 +56,13 @@ impl Website {
counter!(STORE).increment(1); counter!(STORE).increment(1);
let mut things = Vec::with_capacity(all.len()); let mut things = Vec::with_capacity(all.len());
// FIXME failes *sometimes* because "Resource Busy"
match db match db
.query( .query(
"INSERT INTO website $array "INSERT INTO website $array
ON DUPLICATE KEY UPDATE ON DUPLICATE KEY UPDATE
accessed_at = time::now(), accessed_at = time::now(),
status_code = $input.status_code,
crawled = crawled OR $input.crawled crawled = crawled OR $input.crawled
RETURN VALUE id; RETURN VALUE id;
", ",

View File

@@ -1,30 +1,37 @@
use std::{ffi::OsStr, io::ErrorKind, path::PathBuf}; use std::{io::ErrorKind, path::PathBuf};
use reqwest::header::HeaderValue;
use tokio::fs; use tokio::fs;
use tracing::{error, trace}; use tracing::{error, trace, warn};
use url::Url; use url::Url;
pub fn as_path(url: &Url) -> PathBuf { pub fn as_path(url: &Url, content_type: &HeaderValue) -> PathBuf {
// extract data from url to save it accurately // extract data from url to save it accurately
let url_path = PathBuf::from("./downloaded/".to_string() + url.domain().unwrap_or("UnknownDomain") + url.path()); let mut url_path = PathBuf::from("./downloaded/".to_string() + url.domain().unwrap_or("UnknownDomain") + url.path());
// if it's a file if let Ok(header) = content_type.to_str() {
let (basepath, filename) = if url_path.extension().filter(valid_file_extension).is_some() { // text/html; charset=UTF-8; option=value
// get everything up till the file let ttype = if let Some((t, _)) = header.split_once(';') {
let basepath = url_path.ancestors().skip(1).take(1).collect::<PathBuf>(); t
// get the file name } else {
let filename = url_path.file_name().expect("This should exist").to_string_lossy(); header
trace!("Save path: {:?} and base path: {:?}", &url_path, &basepath); };
(basepath, filename.to_string())
if let Some((ttype, subtype)) = ttype.split_once('/') {
trace!("Found Content-Type to be: {ttype}/{subtype} for {}", url.to_string());
// If the Content-Type header is "*/html" (most likely "text/html") and the path's
// extension is anything but html:
if subtype=="html" && !url_path.extension().is_some_and(|f| f=="html" || f=="htm" ) {
// time to slap a index.html to the end of that path there!
url_path = url_path.join("index.html");
}
}
} else { } else {
(url_path.clone(), "index.html".into()) warn!("Header: {:?} couldn't be parsed into a string!", content_type);
}; }
trace!("Final path for {} is: {:?}", url, url_path);
let mut path = PathBuf::new(); url_path
path = path.join(basepath);
path = path.join(filename);
path
} }
pub async fn init(filename: &PathBuf) -> Option<fs::File> { pub async fn init(filename: &PathBuf) -> Option<fs::File> {
@@ -50,29 +57,10 @@ pub async fn init(filename: &PathBuf) -> Option<fs::File> {
error!("Couldn't get file's parents: {:?}", &filename); error!("Couldn't get file's parents: {:?}", &filename);
} }
} else { } else {
error!("File creation: {err} {:?}", filename); error!("File open error: {err} {:?}", filename);
} }
// we don't care about other errors, we can't/shouldn't fix them // we don't care about other errors, we can't/shouldn't fix them
None None
} }
} }
} }
fn valid_file_extension(take: &&OsStr) -> bool {
let los = take.to_string_lossy();
let all = los.split('.');
match all.last() {
Some(s) => {
// FIXME it's worth noting that the dumb tlds like .zip are in here,
// which could cause problems
let all_domains = include_str!("tlds-alpha-by-domain.txt");
// check if it is a domain
match all_domains.lines().map(str::to_lowercase).find(|x| x==s.to_lowercase().as_str()) {
Some(_) => false,
None => true
}
},
None => false,
}
}

View File

@@ -1,5 +1,4 @@
#![feature(ip_from)] #![feature(ip_from)]
#![feature(async_closure)]
#![warn(clippy::expect_used)] #![warn(clippy::expect_used)]
#![deny(clippy::unwrap_used)] #![deny(clippy::unwrap_used)]
@@ -19,8 +18,8 @@ use metrics::{counter, gauge};
use metrics_exporter_prometheus::PrometheusBuilder; use metrics_exporter_prometheus::PrometheusBuilder;
use serde::Deserialize; use serde::Deserialize;
use surrealdb::{engine::remote::ws::Client, Surreal}; use surrealdb::{engine::remote::ws::Client, Surreal};
use tokio::{io::AsyncWriteExt, task::JoinSet}; use tokio::{io::{AsyncWriteExt, BufWriter}, task::JoinSet};
use tracing::{debug, error, info, instrument, level_filters::LevelFilter, trace, trace_span}; use tracing::{debug, debug_span, error, info, instrument, level_filters::LevelFilter, trace, trace_span, warn};
use tracing_subscriber::{fmt, layer::SubscriberExt, EnvFilter, Layer, Registry}; use tracing_subscriber::{fmt, layer::SubscriberExt, EnvFilter, Layer, Registry};
mod db; mod db;
@@ -173,24 +172,45 @@ async fn process(mut site: Website, db: Surreal<Client>, reqwest: reqwest::Clien
// Send the http request (get) // Send the http request (get)
if let Ok(response) = request_builder.send().await { if let Ok(response) = request_builder.send().await {
// Get body from response let headers = response.headers();
let code = response.status();
let path = filesystem::as_path(&site.site); #[allow(non_snake_case)]
let CT = headers.get("Content-Type");
let ct = headers.get("content-type");
let ct = match (CT,ct) {
(None, None) => {
warn!("Server did not respond with Content-Type header. Url: {} Headers: ({:?})", site.site.to_string(), headers);
return
},
(None, Some(a)) => a,
(Some(a), None) => a,
(Some(a), Some(_)) => a,
};
// create filepath (handles / -> /index.html)
let path = filesystem::as_path(&site.site, ct);
// make sure that the file is good to go // make sure that the file is good to go
if let Some(mut file) = filesystem::init(&path).await { if let Some(file) = filesystem::init(&path).await {
let should_parse = path.to_string_lossy().ends_with(".html"); // Get body from response
let mut buf: Vec<u8> = Vec::new();
// stream the response onto the disk // stream the response onto the disk
let mut stream = response.bytes_stream(); let mut stream = response.bytes_stream();
let should_parse = path.to_string_lossy().ends_with(".html");
let mut writer = BufWriter::new(file);
let mut buf: Vec<u8> = Vec::new();
// Write file to disk
info!("Writing at: {:?}", path);
while let Some(data) = stream.next().await { while let Some(data) = stream.next().await {
match data { match data {
Ok(data) => { Ok(data) => {
debug!("Writing at: {:?}", path); let _ = writer.write_all(&data).await;
let _ = file.write_all(&data).await;
// If we are going to parse this file later, we will save it // If we are going to parse this file later, we will save it
// into memory as well as the disk. // into memory as well as the disk.
// We do this because the data here might be incomplete
if should_parse { if should_parse {
data.iter().for_each(|f| buf.push(*f)); data.iter().for_each(|f| buf.push(*f));
} }
@@ -200,8 +220,14 @@ async fn process(mut site: Website, db: Surreal<Client>, reqwest: reqwest::Clien
}, },
} }
} }
let _ = writer.flush();
// (If needed) Parse the file
if should_parse { if should_parse {
let span = debug_span!("Should Parse");
let enter = span.enter();
// Parse document and get relationships // Parse document and get relationships
let sites = parser::parse(&site, &buf).await; let sites = parser::parse(&site, &buf).await;
// De-duplicate this list // De-duplicate this list
@@ -215,6 +241,8 @@ async fn process(mut site: Website, db: Surreal<Client>, reqwest: reqwest::Clien
trace!("Saved {diff} from being entered into the db by de-duping"); trace!("Saved {diff} from being entered into the db by de-duping");
// Store all the other sites so that we can link to them. // Store all the other sites so that we can link to them.
let _ = Website::store_all(de_dupe_sites, &db).await; let _ = Website::store_all(de_dupe_sites, &db).await;
drop(enter);
} }
// METRICS // METRICS
@@ -222,11 +250,14 @@ async fn process(mut site: Website, db: Surreal<Client>, reqwest: reqwest::Clien
counter!(GET_METRIC).increment(1); counter!(GET_METRIC).increment(1);
// update self in db // update self in db
site.set_crawled(); site.crawled = true;
Website::store_all(vec![site], &db).await; site.status_code = code.as_u16();
Website::store_all(vec![site.clone()], &db).await;
} else { } else {
error!("File failed to cooperate: {:?}", path); error!("File failed to cooperate: {:?}", path);
} }
trace!("Done processing: {}", &site.site);
} else { } else {
error!("Failed to get: {}", &site.site); error!("Failed to get: {}", &site.site);
} }
@@ -244,7 +275,7 @@ async fn get_uncrawled_links(
count = config.batch_size; count = config.batch_size;
} }
debug!("Getting uncrawled links"); debug!("Getting {} uncrawled links", count);
let mut response = db let mut response = db
.query("SELECT * FROM website WHERE crawled = false AND site ~ type::string($format) LIMIT $count;") .query("SELECT * FROM website WHERE crawled = false AND site ~ type::string($format) LIMIT $count;")

View File

@@ -39,7 +39,7 @@ impl TokenSink for Website {
if let Some(mut parsed) = url { if let Some(mut parsed) = url {
parsed.set_query(None); parsed.set_query(None);
parsed.set_fragment(None); parsed.set_fragment(None);
debug!("Final cleaned URL: `{}`", parsed.to_string()); trace!("Final cleaned URL: `{}`", parsed.to_string());
let web = Website::new(&parsed.to_string(), false); let web = Website::new(&parsed.to_string(), false);
links.push(web); links.push(web);
} }
@@ -127,16 +127,15 @@ fn try_get_url(parent: &Url, link: &str) -> Option<Url> {
} }
let url = origin.clone() + link; let url = origin.clone() + link;
trace!("Built `{url}` from `{origin} + `{}`", link.to_string());
if let Ok(url) = Url::parse(&url) { if let Ok(url) = Url::parse(&url) {
trace!("Saved relative url `{}` AS: `{}`", link, url); trace!("Built `{url}` from `{origin} + `{}`", link.to_string());
Some(url) Some(url)
} else { } else {
error!( error!(
"Failed to reconstruct a url from relative url: `{}` on site: `{}`", "Failed to reconstruct a url from relative url: `{}` on site: `{}`. Failed url was: {}",
link, link,
parent.to_string() parent.to_string(),
url
); );
None None
} }

File diff suppressed because it is too large Load Diff