Compare commits
2 Commits
a9465dda6e
...
c08a20ac00
Author | SHA1 | Date | |
---|---|---|---|
c08a20ac00 | |||
94912e9125 |
@ -5,7 +5,10 @@ use tracing::{debug, error, instrument, trace, warn};
|
||||
use url::Url;
|
||||
|
||||
#[instrument(skip(data))]
|
||||
pub async fn store(data: &str, url: &Url) {
|
||||
/// Returns whether or not the saved file should be parsed.
|
||||
/// If the file is just data, like an image, it doesn't need to be parsed.
|
||||
/// If it's html, then it does need to be parsed.
|
||||
pub async fn store(data: &str, url: &Url) -> bool {
|
||||
// extract data from url to save it accurately
|
||||
let url_path = PathBuf::from("./downloaded/".to_string() + url.domain().unwrap_or("UnknownDomain") + url.path());
|
||||
|
||||
@ -21,19 +24,20 @@ pub async fn store(data: &str, url: &Url) {
|
||||
(url_path.clone(), "index.html".into())
|
||||
};
|
||||
|
||||
let should_parse = filename.ends_with(".html");
|
||||
|
||||
debug!("Writing at: {:?} {:?}", basepath, filename);
|
||||
|
||||
// create the folders
|
||||
if let Err(err) = fs::create_dir_all(&basepath).await {
|
||||
error!("Dir creation: {err} {:?}", basepath);
|
||||
} else {
|
||||
// FIXME I don't think this handles index.html files well...
|
||||
// TODO this should probably append .html to non-described files
|
||||
// create the file if that was successful
|
||||
if let Err(err) = fs::write(&basepath.join(filename), data).await {
|
||||
error!("File creation: {err} {:?}", url_path);
|
||||
}
|
||||
}
|
||||
|
||||
should_parse
|
||||
}
|
||||
|
||||
fn valid_file_extension(take: &&OsStr) -> bool {
|
||||
@ -41,35 +45,14 @@ fn valid_file_extension(take: &&OsStr) -> bool {
|
||||
let all = los.split('.');
|
||||
match all.last() {
|
||||
Some(s) => {
|
||||
match s.to_lowercase().as_str() {
|
||||
"html" => true,
|
||||
"css" => true,
|
||||
"js" => true,
|
||||
"ts" => true,
|
||||
"otf" => true, // font
|
||||
// FIXME it's worth noting that the dumb tlds like .zip are in here,
|
||||
// which could cause problems
|
||||
let all_domains = include_str!("tlds-alpha-by-domain.txt");
|
||||
|
||||
"png" => true,
|
||||
"svg" => true,
|
||||
"jpg" => true,
|
||||
"jpeg" => true,
|
||||
"mp4" => true,
|
||||
"mp3" => true,
|
||||
"webp" => true,
|
||||
|
||||
"pdf" => true,
|
||||
"json" => true,
|
||||
"xml" => true,
|
||||
|
||||
// IGNORE
|
||||
// TODO Should this be a list of all domains?
|
||||
"org" => false,
|
||||
"com" => false,
|
||||
"net" => false,
|
||||
|
||||
_ => {
|
||||
warn!("Might be forgetting a file extension: {s}");
|
||||
false
|
||||
}
|
||||
// check if it is a domain
|
||||
match all_domains.lines().map(str::to_lowercase).find(|x| x==s.to_lowercase().as_str()) {
|
||||
Some(_) => false,
|
||||
None => true
|
||||
}
|
||||
},
|
||||
None => false,
|
||||
|
41
src/main.rs
41
src/main.rs
@ -24,6 +24,8 @@ const GET_IN_FLIGHT: &str = "gets_in_flight";
|
||||
const SITES_CRAWLED: &str = "pages_crawled";
|
||||
const BEING_PROCESSED: &str = "pages_being_processed";
|
||||
|
||||
const BATCH_SIZE: usize = 2;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct Config {
|
||||
surreal_ns: String,
|
||||
@ -109,13 +111,7 @@ async fn main() {
|
||||
let span = trace_span!("Loop");
|
||||
let span = span.enter();
|
||||
while crawled < config.budget {
|
||||
let get_num = if config.budget - crawled < 100 {
|
||||
config.budget - crawled
|
||||
} else {
|
||||
100
|
||||
};
|
||||
|
||||
let uncrawled = get_uncrawled_links(&db, get_num, config.crawl_filter.clone()).await;
|
||||
let uncrawled = get_uncrawled_links(&db, config.budget - crawled, config.crawl_filter.clone()).await;
|
||||
if uncrawled.is_empty() {
|
||||
info!("Had more budget but finished crawling everything.");
|
||||
return;
|
||||
@ -170,26 +166,26 @@ async fn process(mut site: Website, db: Surreal<Client>, reqwest: reqwest::Clien
|
||||
// Send the http request (get)
|
||||
if let Ok(response) = request_builder.send().await {
|
||||
|
||||
// METRICS
|
||||
g.decrement(1);
|
||||
counter!(GET_METRIC).increment(1);
|
||||
|
||||
// TODO if this will fail if the object we are downloading is
|
||||
// larger than the memory of the device it's running on.
|
||||
// We should store it *as* we download it then parse it in-place.
|
||||
// Get body from response
|
||||
let data = response
|
||||
.text()
|
||||
.await
|
||||
.expect("Failed to read http response's body!");
|
||||
|
||||
// Store document
|
||||
filesystem::store(&data, &site.site).await;
|
||||
// METRICS
|
||||
g.decrement(1);
|
||||
counter!(GET_METRIC).increment(1);
|
||||
|
||||
// Store document
|
||||
let should_parse = filesystem::store(&data, &site.site).await;
|
||||
|
||||
if should_parse {
|
||||
// Parse document and get relationships
|
||||
let sites = parser::parse(&site, &data).await;
|
||||
|
||||
// update self in db
|
||||
site.set_crawled();
|
||||
Website::store_all(vec![site], &db).await;
|
||||
|
||||
// De-duplicate this list
|
||||
let prev_len = sites.len();
|
||||
let set = sites.into_iter().fold(HashSet::new(), |mut set,item| {
|
||||
@ -202,6 +198,11 @@ async fn process(mut site: Website, db: Surreal<Client>, reqwest: reqwest::Clien
|
||||
|
||||
// Store all the other sites so that we can link to them.
|
||||
let _ = Website::store_all(de_dupe_sites, &db).await;
|
||||
}
|
||||
|
||||
// update self in db
|
||||
site.set_crawled();
|
||||
Website::store_all(vec![site], &db).await;
|
||||
|
||||
} else {
|
||||
error!("Failed to get: {}", &site.site);
|
||||
@ -215,9 +216,11 @@ async fn get_uncrawled_links(
|
||||
mut count: usize,
|
||||
filter: String,
|
||||
) -> Vec<Website> {
|
||||
if count > 100 {
|
||||
count = 100
|
||||
|
||||
if count > BATCH_SIZE {
|
||||
count = BATCH_SIZE;
|
||||
}
|
||||
|
||||
debug!("Getting uncrawled links");
|
||||
|
||||
let mut response = db
|
||||
|
1444
src/tlds-alpha-by-domain.txt
Normal file
1444
src/tlds-alpha-by-domain.txt
Normal file
File diff suppressed because it is too large
Load Diff
Loading…
x
Reference in New Issue
Block a user