Compare commits

...

2 Commits

Author SHA1 Message Date
c08a20ac00 cleanup and more accuratly use metrics 2025-04-15 09:07:16 -06:00
94912e9125 change up how files are discovered 2025-04-15 09:06:57 -06:00
3 changed files with 1492 additions and 62 deletions

View File

@ -5,7 +5,10 @@ use tracing::{debug, error, instrument, trace, warn};
use url::Url; use url::Url;
#[instrument(skip(data))] #[instrument(skip(data))]
pub async fn store(data: &str, url: &Url) { /// Returns whether or not the saved file should be parsed.
/// If the file is just data, like an image, it doesn't need to be parsed.
/// If it's html, then it does need to be parsed.
pub async fn store(data: &str, url: &Url) -> bool {
// extract data from url to save it accurately // extract data from url to save it accurately
let url_path = PathBuf::from("./downloaded/".to_string() + url.domain().unwrap_or("UnknownDomain") + url.path()); let url_path = PathBuf::from("./downloaded/".to_string() + url.domain().unwrap_or("UnknownDomain") + url.path());
@ -21,19 +24,20 @@ pub async fn store(data: &str, url: &Url) {
(url_path.clone(), "index.html".into()) (url_path.clone(), "index.html".into())
}; };
let should_parse = filename.ends_with(".html");
debug!("Writing at: {:?} {:?}", basepath, filename); debug!("Writing at: {:?} {:?}", basepath, filename);
// create the folders // create the folders
if let Err(err) = fs::create_dir_all(&basepath).await { if let Err(err) = fs::create_dir_all(&basepath).await {
error!("Dir creation: {err} {:?}", basepath); error!("Dir creation: {err} {:?}", basepath);
} else { } else {
// FIXME I don't think this handles index.html files well...
// TODO this should probably append .html to non-described files
// create the file if that was successful
if let Err(err) = fs::write(&basepath.join(filename), data).await { if let Err(err) = fs::write(&basepath.join(filename), data).await {
error!("File creation: {err} {:?}", url_path); error!("File creation: {err} {:?}", url_path);
} }
} }
should_parse
} }
fn valid_file_extension(take: &&OsStr) -> bool { fn valid_file_extension(take: &&OsStr) -> bool {
@ -41,35 +45,14 @@ fn valid_file_extension(take: &&OsStr) -> bool {
let all = los.split('.'); let all = los.split('.');
match all.last() { match all.last() {
Some(s) => { Some(s) => {
match s.to_lowercase().as_str() { // FIXME it's worth noting that the dumb tlds like .zip are in here,
"html" => true, // which could cause problems
"css" => true, let all_domains = include_str!("tlds-alpha-by-domain.txt");
"js" => true,
"ts" => true,
"otf" => true, // font
"png" => true, // check if it is a domain
"svg" => true, match all_domains.lines().map(str::to_lowercase).find(|x| x==s.to_lowercase().as_str()) {
"jpg" => true, Some(_) => false,
"jpeg" => true, None => true
"mp4" => true,
"mp3" => true,
"webp" => true,
"pdf" => true,
"json" => true,
"xml" => true,
// IGNORE
// TODO Should this be a list of all domains?
"org" => false,
"com" => false,
"net" => false,
_ => {
warn!("Might be forgetting a file extension: {s}");
false
}
} }
}, },
None => false, None => false,

View File

@ -24,6 +24,8 @@ const GET_IN_FLIGHT: &str = "gets_in_flight";
const SITES_CRAWLED: &str = "pages_crawled"; const SITES_CRAWLED: &str = "pages_crawled";
const BEING_PROCESSED: &str = "pages_being_processed"; const BEING_PROCESSED: &str = "pages_being_processed";
const BATCH_SIZE: usize = 2;
#[derive(Deserialize)] #[derive(Deserialize)]
struct Config { struct Config {
surreal_ns: String, surreal_ns: String,
@ -109,13 +111,7 @@ async fn main() {
let span = trace_span!("Loop"); let span = trace_span!("Loop");
let span = span.enter(); let span = span.enter();
while crawled < config.budget { while crawled < config.budget {
let get_num = if config.budget - crawled < 100 { let uncrawled = get_uncrawled_links(&db, config.budget - crawled, config.crawl_filter.clone()).await;
config.budget - crawled
} else {
100
};
let uncrawled = get_uncrawled_links(&db, get_num, config.crawl_filter.clone()).await;
if uncrawled.is_empty() { if uncrawled.is_empty() {
info!("Had more budget but finished crawling everything."); info!("Had more budget but finished crawling everything.");
return; return;
@ -170,39 +166,44 @@ async fn process(mut site: Website, db: Surreal<Client>, reqwest: reqwest::Clien
// Send the http request (get) // Send the http request (get)
if let Ok(response) = request_builder.send().await { if let Ok(response) = request_builder.send().await {
// METRICS // TODO if this will fail if the object we are downloading is
g.decrement(1); // larger than the memory of the device it's running on.
counter!(GET_METRIC).increment(1); // We should store it *as* we download it then parse it in-place.
// Get body from response // Get body from response
let data = response let data = response
.text() .text()
.await .await
.expect("Failed to read http response's body!"); .expect("Failed to read http response's body!");
// Store document // METRICS
filesystem::store(&data, &site.site).await; g.decrement(1);
counter!(GET_METRIC).increment(1);
// Parse document and get relationships // Store document
let sites = parser::parse(&site, &data).await; let should_parse = filesystem::store(&data, &site.site).await;
if should_parse {
// Parse document and get relationships
let sites = parser::parse(&site, &data).await;
// De-duplicate this list
let prev_len = sites.len();
let set = sites.into_iter().fold(HashSet::new(), |mut set,item| {
set.insert(item);
set
});
let de_dupe_sites: Vec<Website> = set.into_iter().collect();
let diff = prev_len - de_dupe_sites.len();
trace!("Saved {diff} from being entered into the db by de-duping");
// Store all the other sites so that we can link to them.
let _ = Website::store_all(de_dupe_sites, &db).await;
}
// update self in db // update self in db
site.set_crawled(); site.set_crawled();
Website::store_all(vec![site], &db).await; Website::store_all(vec![site], &db).await;
// De-duplicate this list
let prev_len = sites.len();
let set = sites.into_iter().fold(HashSet::new(), |mut set,item| {
set.insert(item);
set
});
let de_dupe_sites: Vec<Website> = set.into_iter().collect();
let diff = prev_len - de_dupe_sites.len();
trace!("Saved {diff} from being entered into the db by de-duping");
// Store all the other sites so that we can link to them.
let _ = Website::store_all(de_dupe_sites, &db).await;
} else { } else {
error!("Failed to get: {}", &site.site); error!("Failed to get: {}", &site.site);
} }
@ -215,9 +216,11 @@ async fn get_uncrawled_links(
mut count: usize, mut count: usize,
filter: String, filter: String,
) -> Vec<Website> { ) -> Vec<Website> {
if count > 100 {
count = 100 if count > BATCH_SIZE {
count = BATCH_SIZE;
} }
debug!("Getting uncrawled links"); debug!("Getting uncrawled links");
let mut response = db let mut response = db

1444
src/tlds-alpha-by-domain.txt Normal file

File diff suppressed because it is too large Load Diff