steam data to the disk

This commit is contained in:
Rushmore75 2025-04-15 13:07:47 -06:00
parent 9aa2d9ce22
commit bdb1094a30
5 changed files with 138 additions and 89 deletions

1
Cargo.lock generated
View File

@ -1966,6 +1966,7 @@ name = "internet_mapper"
version = "0.1.0"
dependencies = [
"base64 0.22.1",
"futures-util",
"html5ever 0.29.1",
"metrics",
"metrics-exporter-prometheus",

View File

@ -5,12 +5,13 @@ edition = "2021"
[dependencies]
base64 = "0.22.1"
futures-util = "0.3.31"
html5ever = "0.29"
metrics = "0.24.1"
metrics-exporter-prometheus = { version = "0.16.2", features=["http-listener"]}
# minio = "0.1.0"
minio = {git="https://github.com/minio/minio-rs.git", rev = "c28f576"}
reqwest = { version = "0.12", features = ["gzip", "default", "rustls-tls"] }
reqwest = { version = "0.12", features = ["gzip", "default", "rustls-tls", "stream"] }
serde = { version = "1.0", features = ["derive"] }
surrealdb = "2.2"
tokio = { version="1.41.0", features = ["full"] }

View File

@ -1,14 +1,10 @@
use std::{ffi::OsStr, path::PathBuf};
use std::{ffi::OsStr, io::ErrorKind, path::PathBuf};
use tokio::fs;
use tracing::{debug, error, instrument, trace, warn};
use tracing::{error, trace};
use url::Url;
#[instrument(skip(data))]
/// Returns whether or not the saved file should be parsed.
/// If the file is just data, like an image, it doesn't need to be parsed.
/// If it's html, then it does need to be parsed.
pub async fn store(data: &str, url: &Url) -> bool {
pub fn as_path(url: &Url) -> PathBuf {
// extract data from url to save it accurately
let url_path = PathBuf::from("./downloaded/".to_string() + url.domain().unwrap_or("UnknownDomain") + url.path());
@ -24,20 +20,42 @@ pub async fn store(data: &str, url: &Url) -> bool {
(url_path.clone(), "index.html".into())
};
let should_parse = filename.ends_with(".html");
let mut path = PathBuf::new();
path = path.join(basepath);
path = path.join(filename);
debug!("Writing at: {:?} {:?}", basepath, filename);
path
}
pub async fn init(filename: &PathBuf) -> Option<fs::File> {
let file = async || tokio::fs::OpenOptions::new()
.append(true)
.create(true)
.open(&filename).await;
match file().await {
Ok(ok) => Some(ok),
Err(err) => {
// the file/folder isn't found
if err.kind() == ErrorKind::NotFound {
if let Some(parent ) = &filename.parent() {
// create the folders
if let Err(err) = fs::create_dir_all(&basepath).await {
error!("Dir creation: {err} {:?}", basepath);
if let Err(err) = fs::create_dir_all(&parent).await {
error!("Dir creation: {err} {:?}", filename);
eprintln!("{}", err)
} else if let Ok(ok) = file().await {
return Some(ok);
}
} else {
if let Err(err) = fs::write(&basepath.join(filename), data).await {
error!("File creation: {err} {:?}", url_path);
error!("Couldn't get file's parents: {:?}", &filename);
}
} else {
error!("File creation: {err} {:?}", filename);
}
// we don't care about other errors, we can't/shouldn't fix them
None
}
}
should_parse
}
fn valid_file_extension(take: &&OsStr) -> bool {

View File

@ -1,9 +1,17 @@
#![feature(ip_from)]
#![feature(async_closure)]
#![warn(clippy::expect_used)]
#![deny(clippy::unwrap_used)]
extern crate html5ever;
use futures_util::StreamExt;
use std::{
collections::HashSet, fs::File, io::Read, net::{IpAddr, Ipv4Addr}
collections::HashSet,
fs::File,
io::Read,
net::{IpAddr, Ipv4Addr},
};
use db::{connect, Website};
@ -11,21 +19,19 @@ use metrics::{counter, gauge};
use metrics_exporter_prometheus::PrometheusBuilder;
use serde::Deserialize;
use surrealdb::{engine::remote::ws::Client, Surreal};
use tokio::task::JoinSet;
use tokio::{io::AsyncWriteExt, task::JoinSet};
use tracing::{debug, error, info, instrument, level_filters::LevelFilter, trace, trace_span};
use tracing_subscriber::{fmt, layer::SubscriberExt, EnvFilter, Layer, Registry};
mod db;
mod parser;
mod filesystem;
mod parser;
const GET_METRIC: &str = "total_gets";
const GET_IN_FLIGHT: &str = "gets_in_flight";
const SITES_CRAWLED: &str = "pages_crawled";
const BEING_PROCESSED: &str = "pages_being_processed";
const BATCH_SIZE: usize = 2;
#[derive(Deserialize)]
struct Config {
surreal_ns: String,
@ -37,6 +43,7 @@ struct Config {
crawl_filter: String,
start_url: String,
budget: usize,
batch_size: usize,
}
#[tokio::main]
@ -111,7 +118,8 @@ async fn main() {
let span = trace_span!("Loop");
let span = span.enter();
while crawled < config.budget {
let uncrawled = get_uncrawled_links(&db, config.budget - crawled, config.crawl_filter.clone()).await;
let uncrawled =
get_uncrawled_links(&db, config.budget - crawled, config.crawl_filter.clone(), &config).await;
if uncrawled.is_empty() {
info!("Had more budget but finished crawling everything.");
return;
@ -137,14 +145,15 @@ async fn main() {
}
drop(span);
if let Ok(mut ok) = db.query("count(select id from website where crawled = true)").await {
if let Ok(mut ok) = db
.query("count(select id from website where crawled = true)")
.await
{
let res = ok.take::<Option<usize>>(0);
if let Ok(i) = res {
if let Some(n) = i {
if let Ok(Some(n)) = res {
info!("Total crawled pages now equals {n}");
}
}
}
info!("Done");
}
@ -153,7 +162,6 @@ async fn main() {
/// Downloads and crawls and stores a webpage.
/// It is acceptable to clone `db`, `reqwest`, and `s3` because they all use `Arc`s internally. - Noted by Oliver
async fn process(mut site: Website, db: Surreal<Client>, reqwest: reqwest::Client) {
// METRICS
trace!("Process: {}", &site.site);
// Build the request
@ -165,27 +173,37 @@ async fn process(mut site: Website, db: Surreal<Client>, reqwest: reqwest::Clien
// Send the http request (get)
if let Ok(response) = request_builder.send().await {
// TODO if this will fail if the object we are downloading is
// larger than the memory of the device it's running on.
// We should store it *as* we download it then parse it in-place.
// Get body from response
let data = response
.text()
.await
.expect("Failed to read http response's body!");
// METRICS
g.decrement(1);
counter!(GET_METRIC).increment(1);
let path = filesystem::as_path(&site.site);
// Store document
let should_parse = filesystem::store(&data, &site.site).await;
// make sure that the file is good to go
if let Some(mut file) = filesystem::init(&path).await {
let should_parse = path.to_string_lossy().ends_with(".html");
let mut buf: Vec<u8> = Vec::new();
// stream the response onto the disk
let mut stream = response.bytes_stream();
while let Some(data) = stream.next().await {
match data {
Ok(data) => {
debug!("Writing at: {:?}", path);
let _ = file.write_all(&data).await;
// If we are going to parse this file later, we will save it
// into memory as well as the disk.
if should_parse {
data.iter().for_each(|f| buf.push(*f));
}
},
Err(err) => {
eprintln!("{}", err)
},
}
}
if should_parse {
// Parse document and get relationships
let sites = parser::parse(&site, &data).await;
let sites = parser::parse(&site, &buf).await;
// De-duplicate this list
let prev_len = sites.len();
let set = sites.into_iter().fold(HashSet::new(), |mut set, item| {
@ -195,30 +213,35 @@ async fn process(mut site: Website, db: Surreal<Client>, reqwest: reqwest::Clien
let de_dupe_sites: Vec<Website> = set.into_iter().collect();
let diff = prev_len - de_dupe_sites.len();
trace!("Saved {diff} from being entered into the db by de-duping");
// Store all the other sites so that we can link to them.
let _ = Website::store_all(de_dupe_sites, &db).await;
}
// METRICS
g.decrement(1);
counter!(GET_METRIC).increment(1);
// update self in db
site.set_crawled();
Website::store_all(vec![site], &db).await;
} else {
error!("File failed to cooperate: {:?}", path);
}
} else {
error!("Failed to get: {}", &site.site);
}
}
/// Returns uncrawled links
#[instrument(skip(db))]
#[instrument(skip(db, config))]
async fn get_uncrawled_links(
db: &Surreal<Client>,
mut count: usize,
filter: String,
config: &Config,
) -> Vec<Website> {
if count > BATCH_SIZE {
count = BATCH_SIZE;
if count > config.batch_size {
count = config.batch_size;
}
debug!("Getting uncrawled links");
@ -233,4 +256,3 @@ async fn get_uncrawled_links(
.take(0)
.expect("Returned websites couldn't be parsed")
}

View File

@ -1,5 +1,4 @@
use std::default::Default;
use std::str::FromStr;
use html5ever::tokenizer::{BufferQueue, TokenizerResult};
use html5ever::tokenizer::{StartTag, TagToken};
@ -63,12 +62,14 @@ impl TokenSink for Website {
#[instrument(skip_all)]
/// Parses the passed site and returns all the sites it links to.
pub async fn parse(site: &Website, data: &str) -> Vec<Website> {
pub async fn parse(site: &Website, data: &[u8]) -> Vec<Website> {
debug!("Parsing {}", site.site.to_string());
// prep work
let mut other_sites: Vec<Website> = Vec::new();
// change data into something that can be tokenized
let chunk = Tendril::from_str(data).expect("Failed to parse string into Tendril!");
let s: Result<Tendril<fmt::UTF8>, ()> = Tendril::try_from_byte_slice(data);
if let Ok(chunk) = s {
// create buffer of tokens and push our input into it
let token_buffer = BufferQueue::default();
token_buffer.push_back(
@ -86,6 +87,9 @@ pub async fn parse(site: &Website, data: &str) -> Vec<Website> {
}
assert!(token_buffer.is_empty());
tokenizer.end();
} else {
warn!("Tendril failed to parse on: {}", site.site.to_string());
}
other_sites
}
@ -107,7 +111,7 @@ fn try_get_url(parent: &Url, link: &str) -> Option<Url> {
match Url::parse(&format!("{scheme}://{}", link)) {
Ok(url) => Some(url),
Err(err) => {
error!("Failed parsing realative scheme url: {}", err);
error!("Failed parsing relative scheme url: {}", err);
None
}
}
@ -117,10 +121,13 @@ fn try_get_url(parent: &Url, link: &str) -> Option<Url> {
match e {
url::ParseError::RelativeUrlWithoutBase => {
// Is: scheme://host:port
let origin = parent.origin().ascii_serialization();
let mut origin = parent.origin().ascii_serialization();
if !origin.ends_with('/') && !link.starts_with('/') {
origin += "/";
}
let url = origin.clone() + link;
trace!("Built `{url}` from `{origin} + {}`", link.to_string());
trace!("Built `{url}` from `{origin} + `{}`", link.to_string());
if let Ok(url) = Url::parse(&url) {
trace!("Saved relative url `{}` AS: `{}`", link, url);