uhhhh some stuff rpolly?

main
Zynh0722 2023-07-25 08:14:42 -07:00
parent 300c0385e8
commit 33ad2f14a7
4 changed files with 147 additions and 146 deletions

40
src/cache.rs Normal file
View File

@ -0,0 +1,40 @@
use crate::state::AppState;
use super::error;
use serde::Serialize;
use tokio::io::AsyncReadExt;
use std::io;
use std::collections::HashMap;
pub async fn write_to_cache<T, Y>(records: &HashMap<T, Y>) -> io::Result<()>
where
T: Serialize,
Y: Serialize,
{
let mut records_cache = tokio::fs::File::create(".cache/data").await.unwrap();
let mut buf: Vec<u8> = Vec::with_capacity(200);
bincode::serialize_into(&mut buf, records).map_err(|err| error::io_other(&err.to_string()))?;
let bytes_written = tokio::io::copy(&mut buf.as_slice(), &mut records_cache).await?;
tracing::debug!("state cache size: {}", bytes_written);
Ok(())
}
pub async fn fetch_cache() -> AppState {
let records = if let Ok(file) = tokio::fs::File::open(".cache/data").await.as_mut() {
let mut buf: Vec<u8> = Vec::with_capacity(200);
file.read_to_end(&mut buf).await.unwrap();
bincode::deserialize_from(&mut buf.as_slice()).unwrap()
} else {
HashMap::new()
};
AppState::new(records)
}

View File

@ -1,45 +1,36 @@
use async_zip::tokio::write::ZipFileWriter; use async_zip::{tokio::write::ZipFileWriter, Compression, ZipEntryBuilder};
use async_zip::{Compression, ZipEntryBuilder};
use axum::body::StreamBody;
use axum::extract::{ConnectInfo, State};
use axum::http::{Request, StatusCode};
use axum::middleware::{self, Next};
use axum::response::{IntoResponse, Response};
use axum::routing::{get, post};
use axum::TypedHeader;
use axum::{ use axum::{
extract::{DefaultBodyLimit, Multipart}, body::StreamBody,
response::Redirect, extract::{ConnectInfo, DefaultBodyLimit, Multipart, State},
Router, http::{Request, StatusCode},
middleware::{self, Next},
response::{IntoResponse, Redirect, Response},
routing::{get, post},
Router, TypedHeader,
}; };
use futures::TryStreamExt; use futures::TryStreamExt;
use headers::{Header, HeaderName, HeaderValue}; use nyazoom_headers::ForwardedFor;
use rand::distributions::{Alphanumeric, DistString};
use rand::rngs::SmallRng;
use rand::SeedableRng;
use sanitize_filename_reader_friendly::sanitize; use sanitize_filename_reader_friendly::sanitize;
use serde::Serialize; use std::{io, net::SocketAddr, path::Path};
use tokio::io::AsyncReadExt; use tokio_util::{
use tokio_util::compat::FuturesAsyncWriteCompatExt; compat::FuturesAsyncWriteCompatExt,
io::{ReaderStream, StreamReader},
use std::collections::HashMap; };
use std::io;
use std::net::SocketAddr;
use std::path::Path;
use tokio_util::io::{ReaderStream, StreamReader};
use tower_http::{limit::RequestBodyLimitLayer, services::ServeDir, trace::TraceLayer}; use tower_http::{limit::RequestBodyLimitLayer, services::ServeDir, trace::TraceLayer};
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt};
mod cache;
mod nyazoom_headers;
mod state; mod state;
mod util;
use state::{AppState, UploadRecord}; use state::{AppState, UploadRecord};
@ -63,9 +54,9 @@ async fn main() -> io::Result<()> {
.init(); .init();
// uses create_dir_all to create both .cache and serve inside it in one go // uses create_dir_all to create both .cache and serve inside it in one go
make_dir(".cache/serve").await?; util::make_dir(".cache/serve").await?;
let state = fetch_cache().await; let state = cache::fetch_cache().await;
// Router Setup // Router Setup
let app = Router::new() let app = Router::new()
@ -91,23 +82,13 @@ async fn main() -> io::Result<()> {
Ok(()) Ok(())
} }
// async fn log_source<B>(
// ConnectInfo(addr): ConnectInfo<SocketAddr>,
// req: Request<B>,
// next: Next<B>,
// ) -> Response {
// tracing::info!("{}", addr);
//
// next.run(req).await
// }
async fn log_source<B>( async fn log_source<B>(
ConnectInfo(addr): ConnectInfo<SocketAddr>, ConnectInfo(addr): ConnectInfo<SocketAddr>,
TypedHeader(ForwardedFor(forwarded_for)): TypedHeader<ForwardedFor>, forwarded_for: Option<TypedHeader<ForwardedFor>>,
req: Request<B>, req: Request<B>,
next: Next<B>, next: Next<B>,
) -> Response { ) -> Response {
tracing::info!("{} : {}", addr, forwarded_for); tracing::info!("{} : {:?}", addr, forwarded_for);
next.run(req).await next.run(req).await
} }
@ -118,7 +99,7 @@ async fn upload_to_zip(
) -> Result<Redirect, (StatusCode, String)> { ) -> Result<Redirect, (StatusCode, String)> {
tracing::debug!("{:?}", *state.records.lock().await); tracing::debug!("{:?}", *state.records.lock().await);
let cache_name = get_random_name(10); let cache_name = util::get_random_name(10);
let archive_path = Path::new(".cache/serve").join(&format!("{}.zip", &cache_name)); let archive_path = Path::new(".cache/serve").join(&format!("{}.zip", &cache_name));
@ -162,7 +143,7 @@ async fn upload_to_zip(
let mut records = state.records.lock().await; let mut records = state.records.lock().await;
records.insert(cache_name.clone(), UploadRecord::new(archive_path)); records.insert(cache_name.clone(), UploadRecord::new(archive_path));
write_to_cache(&records) cache::write_to_cache(&records)
.await .await
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?; .map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?;
@ -189,113 +170,11 @@ async fn download(
.unwrap() .unwrap()
.into_response()); .into_response());
} else { } else {
let _ = tokio::fs::remove_file(&record.file); let _ = tokio::fs::remove_file(&record.file).await;
records.remove(&id); records.remove(&id);
write_to_cache(&records).await.unwrap(); cache::write_to_cache(&records).await.unwrap();
} }
} }
Ok(Redirect::to("/404.html").into_response()) Ok(Redirect::to("/404.html").into_response())
} }
#[inline]
async fn make_dir<T>(name: T) -> io::Result<()>
where
T: AsRef<Path>,
{
tokio::fs::create_dir_all(name)
.await
.or_else(|err| match err.kind() {
io::ErrorKind::AlreadyExists => Ok(()),
_ => Err(err),
})
}
#[inline]
fn get_random_name(len: usize) -> String {
let mut rng = SmallRng::from_entropy();
Alphanumeric.sample_string(&mut rng, len)
}
async fn write_to_cache<T, Y>(records: &HashMap<T, Y>) -> io::Result<()>
where
T: Serialize,
Y: Serialize,
{
let mut records_cache = tokio::fs::File::create(".cache/data").await.unwrap();
let mut buf: Vec<u8> = Vec::with_capacity(200);
bincode::serialize_into(&mut buf, &*records)
.map_err(|err| error::io_other(&err.to_string()))?;
let bytes_written = tokio::io::copy(&mut buf.as_slice(), &mut records_cache).await?;
tracing::debug!("state cache size: {}", bytes_written);
Ok(())
}
async fn fetch_cache() -> AppState {
let records = if let Ok(file) = tokio::fs::File::open(".cache/data").await.as_mut() {
let mut buf: Vec<u8> = Vec::with_capacity(200);
file.read_to_end(&mut buf).await.unwrap();
bincode::deserialize_from(&mut buf.as_slice()).unwrap()
} else {
HashMap::new()
};
AppState::new(records)
}
#[allow(dead_code)]
static UNITS: [&str; 6] = ["KiB", "MiB", "GiB", "TiB", "PiB", "EiB"];
// This function is actually rather interesting to me, I understand that rust is
// very powerful, and its very safe, but i find it rather amusing that the [] operator
// doesn't check bounds, meaning it can panic at runtime. Usually rust is very
// very careful about possible panics
//
// although this function shouldn't be able to panic at runtime due to known bounds
// being listened to
#[inline]
fn _bytes_to_human_readable(bytes: u64) -> String {
let mut running = bytes as f64;
let mut count = 0;
while running > 1024.0 && count <= 6 {
running /= 1024.0;
count += 1;
}
format!("{:.2} {}", running, UNITS[count - 1])
}
struct ForwardedFor(String);
static FF_TEXT: &str = "x-forwarded-for";
static FF_NAME: HeaderName = HeaderName::from_static(FF_TEXT);
impl Header for ForwardedFor {
fn name() -> &'static HeaderName {
&FF_NAME
}
fn decode<'i, I>(values: &mut I) -> Result<Self, headers::Error>
where
Self: Sized,
I: Iterator<Item = &'i headers::HeaderValue>,
{
let value = values
.next()
.ok_or_else(headers::Error::invalid)?
.to_str()
.map_err(|_| headers::Error::invalid())?
.to_owned();
Ok(ForwardedFor(value))
}
fn encode<E: Extend<headers::HeaderValue>>(&self, values: &mut E) {
values.extend(std::iter::once(HeaderValue::from_str(&self.0).unwrap()));
}
}

33
src/nyazoom_headers.rs Normal file
View File

@ -0,0 +1,33 @@
use headers::{self, Header, HeaderName, HeaderValue};
#[derive(Debug)]
pub struct ForwardedFor(String);
pub static FF_TEXT: &str = "x-forwarded-for";
pub static FF_NAME: HeaderName = HeaderName::from_static(FF_TEXT);
impl Header for ForwardedFor {
fn name() -> &'static HeaderName {
&FF_NAME
}
fn decode<'i, I>(values: &mut I) -> Result<Self, headers::Error>
where
Self: Sized,
I: Iterator<Item = &'i headers::HeaderValue>,
{
let value = values
.next()
.ok_or_else(headers::Error::invalid)?
.to_str()
.map_err(|_| headers::Error::invalid())?
.to_owned();
Ok(ForwardedFor(value))
}
fn encode<E: Extend<headers::HeaderValue>>(&self, values: &mut E) {
values.extend(std::iter::once(HeaderValue::from_str(&self.0).unwrap()));
}
}

49
src/util.rs Normal file
View File

@ -0,0 +1,49 @@
use rand::{
distributions::{Alphanumeric, DistString},
rngs::SmallRng,
SeedableRng,
};
use std::{io, path::Path};
#[inline]
pub async fn make_dir<T>(name: T) -> io::Result<()>
where
T: AsRef<Path>,
{
tokio::fs::create_dir_all(name)
.await
.or_else(|err| match err.kind() {
io::ErrorKind::AlreadyExists => Ok(()),
_ => Err(err),
})
}
#[inline]
pub fn get_random_name(len: usize) -> String {
let mut rng = SmallRng::from_entropy();
Alphanumeric.sample_string(&mut rng, len)
}
#[allow(dead_code)]
pub static UNITS: [&str; 6] = ["KiB", "MiB", "GiB", "TiB", "PiB", "EiB"];
// This function is actually rather interesting to me, I understand that rust is
// very powerful, and its very safe, but i find it rather amusing that the [] operator
// doesn't check bounds, meaning it can panic at runtime. Usually rust is very
// very careful about possible panics
//
// although this function shouldn't be able to panic at runtime due to known bounds
// being listened to
#[inline]
pub fn _bytes_to_human_readable(bytes: u64) -> String {
let mut running = bytes as f64;
let mut count = 0;
while running > 1024.0 && count <= 6 {
running /= 1024.0;
count += 1;
}
format!("{:.2} {}", running, UNITS[count - 1])
}