checkpoint - onto profiling

This commit is contained in:
Rushmore75 2025-03-18 10:53:06 -06:00
parent 82929fd0fc
commit b7540a4680
6 changed files with 941 additions and 552 deletions

1210
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@ -8,10 +8,10 @@ base64 = "0.22.1"
html5ever = "0.29" html5ever = "0.29"
# minio = "0.1.0" # minio = "0.1.0"
minio = {git="https://github.com/minio/minio-rs.git", rev = "c28f576"} minio = {git="https://github.com/minio/minio-rs.git", rev = "c28f576"}
reqwest = "0.12" reqwest = { version = "0.12", features = ["gzip"] }
serde = { version = "1.0", features = ["derive"] } serde = { version = "1.0", features = ["derive"] }
surrealdb = "2.1" surrealdb = "2.2"
tokio = { version="1.41.0", features = ["full"] } tokio = { version="1.41.0", features = ["full"] }
tracing = "0.1" tracing = "0.1"
tracing-subscriber = { version = "0.3", features = ["env-filter"] } tracing-subscriber = { version = "0.3", features = ["env-filter", "local-time"] }
url = { version = "2.5", features = ["serde"] } url = { version = "2.5", features = ["serde"] }

View File

@ -1,9 +1,6 @@
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use surrealdb::{ use surrealdb::{
engine::remote::ws::{Client, Ws}, engine::remote::ws::{Client, Ws}, error::Db, opt::auth::Root, sql::Thing, Response, Surreal
opt::auth::Root,
sql::Thing,
Response, Surreal,
}; };
use tracing::{error, instrument, trace, warn}; use tracing::{error, instrument, trace, warn};
use url::Url; use url::Url;
@ -82,15 +79,15 @@ impl Website {
} }
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn store(&mut self, db: &Surreal<Client>) -> Option<Thing> { pub async fn store(&self, db: &Surreal<Client>) -> Option<Thing> {
// check if it's been gone thru before // check if it's been gone thru before
let mut response = db let mut response = db
.query("SELECT * FROM ONLY website WHERE site = $site LIMIT 1") .query("SELECT * FROM ONLY website WHERE site = $site LIMIT 1")
.bind(("site", self.site.to_string())) .bind(("site", self.site.to_string()))
.await .await
.unwrap(); .expect("Failed to check surreal for duplicates!");
if let Some(old) = response.take::<Option<Website>>(0).unwrap() { if let Some(old) = response.take::<Option<Website>>(0).expect("Failed to read reponse from surreal for duplicates.") {
// site exists already // site exists already
if let Some(id) = old.id { if let Some(id) = old.id {
// make sure to preserve the "crawled status" // make sure to preserve the "crawled status"
@ -106,7 +103,18 @@ impl Website {
} }
} }
Err(e) => { Err(e) => {
error!("{}", e); match e {
surrealdb::Error::Db(error) => {
match error {
Db::QueryCancelled => todo!(),
Db::QueryNotExecuted => todo!(),
Db::QueryNotExecutedDetail { message } => todo!(),
_=>{},
}
},
_=>{},
}
// error!("{}", e);
} }
}; };
} }

View File

@ -1,3 +1,6 @@
#![feature(type_alias_impl_trait)]
#![feature(const_async_blocks)]
extern crate html5ever; extern crate html5ever;
use std::time::Instant; use std::time::Instant;
@ -5,12 +8,13 @@ use std::time::Instant;
use db::{connect, Website}; use db::{connect, Website};
use s3::S3; use s3::S3;
use surrealdb::{engine::remote::ws::Client, Surreal}; use surrealdb::{engine::remote::ws::Client, Surreal};
use tokio::task::JoinSet;
use tracing::{debug, info, instrument, trace, trace_span}; use tracing::{debug, info, instrument, trace, trace_span};
use tracing_subscriber::EnvFilter; use tracing_subscriber::{fmt::time::LocalTime, EnvFilter};
mod db; mod db;
mod s3;
mod parser; mod parser;
mod s3;
struct Config<'a> { struct Config<'a> {
surreal_ns: &'a str, surreal_ns: &'a str,
@ -31,52 +35,62 @@ async fn main() {
tracing_subscriber::fmt() tracing_subscriber::fmt()
.with_env_filter(EnvFilter::from_default_env()) .with_env_filter(EnvFilter::from_default_env())
.with_line_number(false) .with_line_number(true)
.without_time() .with_file(true)
.with_timer(LocalTime::rfc_3339())
.init(); .init();
debug!("Starting..."); debug!("Starting...");
// Would probably take these in as parameters from a cli
let starting_url = "https://en.wikipedia.org/";
// When getting uncrawled pages, name must contain this variable. "" will effectively get ignored.
let crawl_filter = "en.wikipedia.org/";
let budget = 50;
let mut crawled = 0;
let config = Config { let config = Config {
surreal_url: "localhost:8000", surreal_url: "localhost:8000",
surreal_username: "root", surreal_username: "root",
surreal_password: "root", surreal_password: "root",
surreal_ns: "test", surreal_ns: "test",
surreal_db: "b64v1", surreal_db: "v1.11.2",
s3_bucket: "b64v1", s3_bucket: "v1.11.2",
s3_url: "http://localhost:9000", s3_url: "http://localhost:9000",
s3_access_key: "8UO76z8wCs9DnpxSbQUY", s3_access_key: "8UO76z8wCs9DnpxSbQUY",
s3_secret_key: "xwKVMpf2jzgprsdo85Dvo74UmO84y0aRrAUorYY5", s3_secret_key: "xwKVMpf2jzgprsdo85Dvo74UmO84y0aRrAUorYY5",
}; };
// Would probably take these in as parameters from a cli let db = connect(&config)
let starting_url = "https://en.wikipedia.org/"; .await
// When getting uncrawled pages, name must contain this variable. "" will effectively get ignored. .expect("Failed to connect to surreal, aborting.");
let crawl_filter = "wikipedia.org/"; let s3 = S3::connect(&config)
let budget = 50; .await
let mut crawled = 0; .expect("Failed to connect to minio, aborting.");
let s3 = S3::connect(&config).await.expect("Failed to connect to minio, aborting.");
let db = connect(&config).await.expect("Failed to connect to surreal, aborting.");
let reqwest = reqwest::Client::builder() let reqwest = reqwest::Client::builder()
// .use_rustls_tls() // .use_rustls_tls()
.gzip(true)
.build() .build()
.unwrap(); .expect("Failed to build reqwest client.");
// Kick off the whole machine - This Website object doesn't matter, it's just to allow for // Kick off the whole machine - This Website object doesn't matter, it's just to allow for
// get() to work. // get() to work.
let span = trace_span!("Pre-Loop"); let span = trace_span!("Pre-Loop");
let pre_loop_span = span.enter(); let pre_loop_span = span.enter();
// Download the site // Download the site
let mut site = Website::new(&starting_url, false); let site = Website::new(&starting_url, false);
get(&mut site, &db, &reqwest, &s3, &mut crawled).await; get(site, db.clone(), reqwest.clone(), s3.clone()).await;
drop(pre_loop_span); drop(pre_loop_span);
let span = trace_span!("Loop"); let span = trace_span!("Loop");
let span = span.enter(); let span = span.enter();
while crawled < budget { while crawled < budget {
let get_num = if budget - crawled < 100 { budget - crawled } else { 100 }; let get_num = if budget - crawled < 100 {
budget - crawled
} else {
100
};
let uncrawled = get_uncrawled_links(&db, get_num, crawl_filter.to_string()).await; let uncrawled = get_uncrawled_links(&db, get_num, crawl_filter.to_string()).await;
if uncrawled.len() == 0 { if uncrawled.len() == 0 {
@ -88,11 +102,19 @@ async fn main() {
let span = trace_span!("Crawling"); let span = trace_span!("Crawling");
let _ = span.enter(); let _ = span.enter();
for mut site in uncrawled { {
get(&mut site, &db, &reqwest, &s3, &mut crawled).await; let mut futures = JoinSet::new();
for site in uncrawled {
let percent = format!("{:.2}%", (crawled as f32 / budget as f32) * 100f32); futures.spawn(get(site, db.clone(), reqwest.clone(), s3.clone()));
info!("Crawled {crawled} out of {budget} pages. ({percent})"); // technically the site hasn't be crawled *yet*, but the future
// where it is crawled has been set up.
crawled += 1;
// let percent = format!("{:.2}%", (crawled as f32 / budget as f32) * 100f32);
// info!("Crawled {crawled} out of {budget} pages. ({percent})");
}
debug!("Joining {} futures...", futures.len());
// join all the gets together
let _ = futures.join_all().await;
} }
} }
drop(span); drop(span);
@ -103,25 +125,19 @@ async fn main() {
#[instrument(skip_all)] #[instrument(skip_all)]
/// Downloads and crawls and stores a webpage. /// Downloads and crawls and stores a webpage.
async fn get( /// It is acceptable to clone `db`, `reqwest`, and `s3` because they all use `Arc`s internally. - Noted by Oliver
site: &mut Website, async fn get(mut site: Website, db: Surreal<Client>, reqwest: reqwest::Client, s3: S3) {
db: &Surreal<Client>,
reqwest: &reqwest::Client,
s3: &S3,
count: &mut usize,
) {
trace!("Get: {}", site.to_string()); trace!("Get: {}", site.to_string());
let timer = Timer::start("Got page"); let timer = Timer::start("Got page");
if let Ok(response) = reqwest.get(site.to_string()).send().await { if let Ok(response) = reqwest.get(site.to_string()).send().await {
timer.stop(); timer.stop();
// Get body // Get body
let data = response.text().await.unwrap(); let data = response.text().await.expect("Failed to read http response's body!");
// Store document // Store document
s3.store(&data, &site.site).await; s3.store(&data, &site.site).await;
// Parse document and store relationships // Parse document and store relationships
parser::parse(db, site, data).await; parser::parse(&db, &mut site, &data).await;
*count += 1;
return; return;
} }
trace!("Failed to get: {}", site.to_string()); trace!("Failed to get: {}", site.to_string());
@ -129,15 +145,19 @@ async fn get(
/// Returns uncrawled links /// Returns uncrawled links
#[instrument(skip(db))] #[instrument(skip(db))]
async fn get_uncrawled_links(db: &Surreal<Client>, mut count: usize, param: String) -> Vec<Website> { async fn get_uncrawled_links(
db: &Surreal<Client>,
mut count: usize,
filter: String,
) -> Vec<Website> {
if count > 100 { if count > 100 {
count = 100 count = 100
} }
trace!("Getting uncrawled links"); debug!("Getting uncrawled links");
let mut response = db let mut response = db
.query("SELECT * FROM website WHERE crawled = false AND site CONTAINS type::string($format) LIMIT $count;") .query("SELECT * FROM website WHERE crawled = false AND site CONTAINS type::string($format) LIMIT $count;")
.bind(("format", param)) .bind(("format", filter))
.bind(("count", count)) .bind(("count", count))
.await .await
.expect("Hard-coded query failed..?"); .expect("Hard-coded query failed..?");
@ -162,7 +182,7 @@ impl<'a> Timer<'a> {
pub fn stop(&self) -> f64 { pub fn stop(&self) -> f64 {
let dif = self.start.elapsed().as_micros(); let dif = self.start.elapsed().as_micros();
let ms = dif as f64 / 1000.; let ms = dif as f64 / 1000.;
debug!("{}", format!("{} in {:.3}ms", self.msg, ms)); trace!("{}", format!("{} in {:.3}ms", self.msg, ms));
ms ms
} }
} }

View File

@ -12,12 +12,7 @@ use tracing::instrument;
use crate::db::Website; use crate::db::Website;
use crate::Timer; use crate::Timer;
#[derive(Clone)] impl TokenSink for Website {
struct LinkParser<'a> {
site: &'a Website,
}
impl TokenSink for LinkParser<'_> {
type Handle = Vec<Website>; type Handle = Vec<Website>;
fn process_token(&self, token: Token, _line_number: u64) -> TokenSinkResult<Self::Handle> { fn process_token(&self, token: Token, _line_number: u64) -> TokenSinkResult<Self::Handle> {
@ -40,20 +35,20 @@ impl TokenSink for LinkParser<'_> {
if attr_name == "src" || attr_name == "href" || attr_name == "data" if attr_name == "src" || attr_name == "href" || attr_name == "data"
{ {
// Get clone of the current site object // Get clone of the current site object
let mut web = self.site.clone(); let mut web = self.clone();
// Set url // Set url
let mut url = web.site; let mut url = web.site;
url.set_fragment(None); // removes #xyz url.set_fragment(None); // removes #xyz
let joined = url.join(&attr.value).unwrap(); let joined = url.join(&attr.value).expect("Failed to join url during parsing!");
web.site = joined; web.site = joined;
web.crawled = false; web.crawled = false;
links.push(web); links.push(web);
} }
} }
return TokenSinkResult::Script(links); return TokenSinkResult::Script(links);
} }
local_name!("button") | local_name!("meta") | local_name!("iframe") => { local_name!("button") | local_name!("meta") | local_name!("iframe") => {
@ -70,31 +65,49 @@ impl TokenSink for LinkParser<'_> {
} }
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn parse(db: &Surreal<Client>, site: &mut Website, data: String) { pub async fn parse(db: &Surreal<Client>, site: &mut Website, data: &str) {
// update self in db
site.set_crawled(); site.set_crawled();
site.store(db).await; site.store(db).await;
let sink = LinkParser { site };
let chunk = Tendril::from_str(&data).unwrap();
let mut input = BufferQueue::default();
input.push_back(chunk.try_reinterpret::<fmt::UTF8>().unwrap());
let token = Tokenizer::new(sink.clone(), TokenizerOpts::default()); // prep work
let mut other_sites = Vec::new();
let t = Timer::start("Stored pages"); { // using blocks to prevent compiler's async worries
let mut links_to = Vec::new(); let _t = Timer::start("Parsed page");
while !input.is_empty() {
if let TokenizerResult::Script(s) = token.feed(&mut input) { // change data into something that can be tokenized
for mut web in s { let chunk = Tendril::from_str(&data).expect("Failed to parse string into Tendril!");
if let Some(id) = web.store(db).await { // create buffer of tokens and push our input into it
links_to.push(id); let mut token_buffer = BufferQueue::default();
token_buffer.push_back(chunk.try_reinterpret::<fmt::UTF8>().expect("Failed to reinterprt chunk!"));
// create the tokenizer
let tokenizer = Tokenizer::new(site.clone(), TokenizerOpts::default());
// go thru buffer
while let TokenizerResult::Script(sites) = tokenizer.feed(&mut token_buffer) {
other_sites.push(sites);
}
assert!(token_buffer.is_empty());
tokenizer.end();
}
{
let t = Timer::start("Stored pages");
let mut links_to = Vec::new();
// this is a 2d vec accidentally
for a in other_sites {
for b in a {
// TODO this can become a JoinSet later
let other = b.store(db).await;
if let Some(o) = other {
links_to.push(o);
} }
} }
} }
site.links_to(links_to, db).await;
drop(t);
} }
drop(t);
sink.site.links_to(links_to, db).await;
assert!(input.is_empty());
token.end();
} }

View File

@ -1,11 +1,20 @@
use base64::{alphabet, engine::{self, general_purpose}, Engine};
use minio::s3::{ use minio::s3::{
args::{BucketExistsArgs, MakeBucketArgs}, client::ClientBuilder, creds::StaticProvider, error::Error, http::BaseUrl, types::S3Api, Client args::{BucketExistsArgs, MakeBucketArgs},
client::ClientBuilder,
creds::StaticProvider,
error::Error,
http::BaseUrl,
Client,
}; };
use tracing::{instrument, trace}; use tracing::{instrument, trace, warn};
use url::Url; use url::Url;
use crate::Config; use crate::Config;
const CUSTOM_ENGINE: engine::GeneralPurpose = engine::GeneralPurpose::new(&alphabet::URL_SAFE, general_purpose::NO_PAD);
#[derive(Clone)]
pub struct S3 { pub struct S3 {
bucket_name: String, bucket_name: String,
client: Client, client: Client,
@ -14,7 +23,10 @@ pub struct S3 {
impl S3 { impl S3 {
#[instrument(skip_all, name = "S3")] #[instrument(skip_all, name = "S3")]
pub async fn connect(config: &Config<'_>) -> Result<Self, Error> { pub async fn connect(config: &Config<'_>) -> Result<Self, Error> {
let base_url = config.s3_url.parse::<BaseUrl>().unwrap(); let base_url = config
.s3_url
.parse::<BaseUrl>()
.expect("Failed to parse url into BaseUrl");
let static_provider = let static_provider =
StaticProvider::new(&config.s3_access_key, &config.s3_secret_key, None); StaticProvider::new(&config.s3_access_key, &config.s3_secret_key, None);
@ -25,17 +37,22 @@ impl S3 {
trace!("Checking bucket..."); trace!("Checking bucket...");
let exists = client let exists = client
.bucket_exists(&BucketExistsArgs::new(&config.s3_bucket).unwrap()) .bucket_exists(
&BucketExistsArgs::new(&config.s3_bucket)
.expect("Failed to check if bucket exists"),
)
.await?; .await?;
if !exists { if !exists {
trace!("Creating bucket..."); trace!("Creating bucket...");
client client
.make_bucket(&MakeBucketArgs::new(&config.s3_bucket).unwrap()) .make_bucket(
&MakeBucketArgs::new(&config.s3_bucket).expect("Failed to create bucket!"),
)
.await?; .await?;
} }
trace!("Connection successfull"); trace!("Connection successful");
Ok(Self { Ok(Self {
bucket_name: config.s3_bucket.to_owned(), bucket_name: config.s3_bucket.to_owned(),
@ -44,38 +61,37 @@ impl S3 {
} }
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn store(&self, data: &str, name: &Url) { pub async fn store(&self, data: &str, url: &Url) {
if let Some(domain) = name.to_string().split('#').collect::<Vec<&str>>().get(0) { if let Some(domain) = url.domain() {
use base64::prelude::*; let filename = domain.to_owned() + url.path();
// FIXME can still get unsupported characters, _ I think
let filename = BASE64_URL_SAFE.encode(domain);
trace!("Filename: {filename} from {domain}"); trace!("Created filename: {filename} from raw: {}", url.to_string());
let _ = &self let _ = match &self
.client .client
.put_object_content(&self.bucket_name, &filename, data.to_owned()) .put_object_content(&self.bucket_name, &filename, data.to_owned())
.send() .send()
.await .await {
.unwrap(); Ok(_) => {},
} Err(err) => {
} match err {
Error::InvalidObjectName(_) => {
pub async fn _get(&self, name: &Url) -> Option<String> {
if let Some(domain) = name.domain() {
let filename = domain.to_string() + name.path();
let data = self warn!("Tried storing invalid object name, retrying with Base64 encoding. Last try.");
.client
.get_object(&self.bucket_name, &filename) let filename: String = domain.to_owned() + &CUSTOM_ENGINE.encode(url.path());
.send()
.await let _ = &self
.unwrap(); .client
.put_object_content(&self.bucket_name, &filename, data.to_owned())
if let Ok(segments )= data.content.to_segmented_bytes().await { .send()
return Some(segments.to_bytes().iter().map(|c| *c as char).collect::<String>()) .await
} .unwrap();
},
_ => {},
}
},
};
} }
None
} }
} }