Compare commits

..

No commits in common. "c08a20ac003baeda95f3bb607da9b397f0ffbdc8" and "a9465dda6e32d86a92d0b14e23a1513db9272bf4" have entirely different histories.

3 changed files with 61 additions and 1491 deletions

View File

@ -5,10 +5,7 @@ use tracing::{debug, error, instrument, trace, warn};
use url::Url;
#[instrument(skip(data))]
/// Returns whether or not the saved file should be parsed.
/// If the file is just data, like an image, it doesn't need to be parsed.
/// If it's html, then it does need to be parsed.
pub async fn store(data: &str, url: &Url) -> bool {
pub async fn store(data: &str, url: &Url) {
// extract data from url to save it accurately
let url_path = PathBuf::from("./downloaded/".to_string() + url.domain().unwrap_or("UnknownDomain") + url.path());
@ -24,20 +21,19 @@ pub async fn store(data: &str, url: &Url) -> bool {
(url_path.clone(), "index.html".into())
};
let should_parse = filename.ends_with(".html");
debug!("Writing at: {:?} {:?}", basepath, filename);
// create the folders
if let Err(err) = fs::create_dir_all(&basepath).await {
error!("Dir creation: {err} {:?}", basepath);
} else {
// FIXME I don't think this handles index.html files well...
// TODO this should probably append .html to non-described files
// create the file if that was successful
if let Err(err) = fs::write(&basepath.join(filename), data).await {
error!("File creation: {err} {:?}", url_path);
}
}
should_parse
}
fn valid_file_extension(take: &&OsStr) -> bool {
@ -45,14 +41,35 @@ fn valid_file_extension(take: &&OsStr) -> bool {
let all = los.split('.');
match all.last() {
Some(s) => {
// FIXME it's worth noting that the dumb tlds like .zip are in here,
// which could cause problems
let all_domains = include_str!("tlds-alpha-by-domain.txt");
match s.to_lowercase().as_str() {
"html" => true,
"css" => true,
"js" => true,
"ts" => true,
"otf" => true, // font
// check if it is a domain
match all_domains.lines().map(str::to_lowercase).find(|x| x==s.to_lowercase().as_str()) {
Some(_) => false,
None => true
"png" => true,
"svg" => true,
"jpg" => true,
"jpeg" => true,
"mp4" => true,
"mp3" => true,
"webp" => true,
"pdf" => true,
"json" => true,
"xml" => true,
// IGNORE
// TODO Should this be a list of all domains?
"org" => false,
"com" => false,
"net" => false,
_ => {
warn!("Might be forgetting a file extension: {s}");
false
}
}
},
None => false,

View File

@ -24,8 +24,6 @@ const GET_IN_FLIGHT: &str = "gets_in_flight";
const SITES_CRAWLED: &str = "pages_crawled";
const BEING_PROCESSED: &str = "pages_being_processed";
const BATCH_SIZE: usize = 2;
#[derive(Deserialize)]
struct Config {
surreal_ns: String,
@ -111,7 +109,13 @@ async fn main() {
let span = trace_span!("Loop");
let span = span.enter();
while crawled < config.budget {
let uncrawled = get_uncrawled_links(&db, config.budget - crawled, config.crawl_filter.clone()).await;
let get_num = if config.budget - crawled < 100 {
config.budget - crawled
} else {
100
};
let uncrawled = get_uncrawled_links(&db, get_num, config.crawl_filter.clone()).await;
if uncrawled.is_empty() {
info!("Had more budget but finished crawling everything.");
return;
@ -166,44 +170,39 @@ async fn process(mut site: Website, db: Surreal<Client>, reqwest: reqwest::Clien
// Send the http request (get)
if let Ok(response) = request_builder.send().await {
// TODO if this will fail if the object we are downloading is
// larger than the memory of the device it's running on.
// We should store it *as* we download it then parse it in-place.
// METRICS
g.decrement(1);
counter!(GET_METRIC).increment(1);
// Get body from response
let data = response
.text()
.await
.expect("Failed to read http response's body!");
// METRICS
g.decrement(1);
counter!(GET_METRIC).increment(1);
// Store document
let should_parse = filesystem::store(&data, &site.site).await;
filesystem::store(&data, &site.site).await;
if should_parse {
// Parse document and get relationships
let sites = parser::parse(&site, &data).await;
// De-duplicate this list
let prev_len = sites.len();
let set = sites.into_iter().fold(HashSet::new(), |mut set,item| {
set.insert(item);
set
});
let de_dupe_sites: Vec<Website> = set.into_iter().collect();
let diff = prev_len - de_dupe_sites.len();
trace!("Saved {diff} from being entered into the db by de-duping");
// Store all the other sites so that we can link to them.
let _ = Website::store_all(de_dupe_sites, &db).await;
}
// Parse document and get relationships
let sites = parser::parse(&site, &data).await;
// update self in db
site.set_crawled();
Website::store_all(vec![site], &db).await;
// De-duplicate this list
let prev_len = sites.len();
let set = sites.into_iter().fold(HashSet::new(), |mut set,item| {
set.insert(item);
set
});
let de_dupe_sites: Vec<Website> = set.into_iter().collect();
let diff = prev_len - de_dupe_sites.len();
trace!("Saved {diff} from being entered into the db by de-duping");
// Store all the other sites so that we can link to them.
let _ = Website::store_all(de_dupe_sites, &db).await;
} else {
error!("Failed to get: {}", &site.site);
}
@ -216,11 +215,9 @@ async fn get_uncrawled_links(
mut count: usize,
filter: String,
) -> Vec<Website> {
if count > BATCH_SIZE {
count = BATCH_SIZE;
if count > 100 {
count = 100
}
debug!("Getting uncrawled links");
let mut response = db

File diff suppressed because it is too large Load Diff