extract upload router
parent
9caf7b78fe
commit
ea7dee4dfd
118
src/main.rs
118
src/main.rs
|
@ -1,27 +1,22 @@
|
||||||
use async_zip::{tokio::write::ZipFileWriter, Compression, ZipEntryBuilder};
|
use tokio_util::io::ReaderStream;
|
||||||
|
|
||||||
use futures::TryStreamExt;
|
|
||||||
use tokio_util::{
|
|
||||||
compat::FuturesAsyncWriteCompatExt,
|
|
||||||
io::{ReaderStream, StreamReader},
|
|
||||||
};
|
|
||||||
|
|
||||||
use axum::{
|
use axum::{
|
||||||
body::Body,
|
body::Body,
|
||||||
extract::{ConnectInfo, DefaultBodyLimit, Multipart, Request, State},
|
extract::{ConnectInfo, Request, State},
|
||||||
http::{HeaderMap, Response, StatusCode},
|
http::{HeaderMap, StatusCode},
|
||||||
middleware::{self, Next},
|
middleware::{self, Next},
|
||||||
response::{Html, IntoResponse, Redirect},
|
response::{Html, IntoResponse, Redirect},
|
||||||
routing::{get, post},
|
routing::get,
|
||||||
Json, Router,
|
Json, Router,
|
||||||
};
|
};
|
||||||
use axum_extra::TypedHeader;
|
use axum_extra::TypedHeader;
|
||||||
use tower_http::{limit::RequestBodyLimitLayer, services::ServeDir, trace::TraceLayer};
|
use tower_http::{services::ServeDir, trace::TraceLayer};
|
||||||
|
|
||||||
use sanitize_filename_reader_friendly::sanitize;
|
use std::{io, net::SocketAddr};
|
||||||
|
|
||||||
use std::{io, net::SocketAddr, path::Path};
|
|
||||||
|
|
||||||
|
mod router {
|
||||||
|
pub mod upload;
|
||||||
|
}
|
||||||
mod cache;
|
mod cache;
|
||||||
mod state;
|
mod state;
|
||||||
mod util;
|
mod util;
|
||||||
|
@ -29,9 +24,12 @@ mod views;
|
||||||
|
|
||||||
use util::{headers::ForwardedFor, logging, ssr, sweeper};
|
use util::{headers::ForwardedFor, logging, ssr, sweeper};
|
||||||
|
|
||||||
|
use router::*;
|
||||||
use state::*;
|
use state::*;
|
||||||
use views::*;
|
use views::*;
|
||||||
|
|
||||||
|
use upload::get_upload_router;
|
||||||
|
|
||||||
#[tokio::main]
|
#[tokio::main]
|
||||||
async fn main() -> io::Result<()> {
|
async fn main() -> io::Result<()> {
|
||||||
logging::init_tracing();
|
logging::init_tracing();
|
||||||
|
@ -43,19 +41,23 @@ async fn main() -> io::Result<()> {
|
||||||
|
|
||||||
sweeper::spawn(state.clone());
|
sweeper::spawn(state.clone());
|
||||||
|
|
||||||
|
// Records views
|
||||||
|
let record_router = Router::new()
|
||||||
|
.route("/", get(records))
|
||||||
|
.route("/links", get(records_links));
|
||||||
|
|
||||||
|
// Link pages
|
||||||
|
let link_router = Router::new()
|
||||||
|
.route("/:id", get(link).delete(link_delete))
|
||||||
|
.route("/:id/remaining", get(remaining));
|
||||||
|
|
||||||
// Router Setup
|
// Router Setup
|
||||||
let app = Router::new()
|
let app = Router::new()
|
||||||
.route("/", get(welcome))
|
.route("/", get(welcome))
|
||||||
.route("/upload", post(upload_to_zip))
|
|
||||||
.route("/records", get(records))
|
|
||||||
.route("/records/links", get(records_links))
|
|
||||||
.route("/download/:id", get(download))
|
.route("/download/:id", get(download))
|
||||||
.route("/link/:id", get(link).delete(link_delete))
|
.nest("/upload", get_upload_router())
|
||||||
.route("/link/:id/remaining", get(remaining))
|
.nest("/records", record_router)
|
||||||
.layer(DefaultBodyLimit::disable())
|
.nest("/link", link_router)
|
||||||
.layer(RequestBodyLimitLayer::new(
|
|
||||||
10 * 1024 * 1024 * 1024, // 10GiB
|
|
||||||
))
|
|
||||||
.with_state(state)
|
.with_state(state)
|
||||||
.fallback_service(ServeDir::new("dist"))
|
.fallback_service(ServeDir::new("dist"))
|
||||||
.layer(TraceLayer::new_for_http())
|
.layer(TraceLayer::new_for_http())
|
||||||
|
@ -180,76 +182,6 @@ async fn log_source(
|
||||||
next.run(req).await
|
next.run(req).await
|
||||||
}
|
}
|
||||||
|
|
||||||
async fn upload_to_zip(
|
|
||||||
State(state): State<AppState>,
|
|
||||||
mut body: Multipart,
|
|
||||||
) -> Result<Response<String>, (StatusCode, String)> {
|
|
||||||
tracing::debug!("{:?}", *state.records.lock().await);
|
|
||||||
|
|
||||||
let cache_name = util::get_random_name(10);
|
|
||||||
|
|
||||||
let archive_path = Path::new(".cache/serve").join(format!("{}.zip", &cache_name));
|
|
||||||
|
|
||||||
tracing::debug!("Zipping: {:?}", &archive_path);
|
|
||||||
|
|
||||||
let mut archive = tokio::fs::File::create(&archive_path)
|
|
||||||
.await
|
|
||||||
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?;
|
|
||||||
let mut writer = ZipFileWriter::with_tokio(&mut archive);
|
|
||||||
|
|
||||||
while let Some(field) = body.next_field().await.unwrap() {
|
|
||||||
let file_name = match field.file_name() {
|
|
||||||
Some(file_name) => sanitize(file_name),
|
|
||||||
_ => continue,
|
|
||||||
};
|
|
||||||
|
|
||||||
tracing::debug!("Downloading to Zip: {file_name:?}");
|
|
||||||
|
|
||||||
let stream = field;
|
|
||||||
let body_with_io_error = stream.map_err(io::Error::other);
|
|
||||||
let mut body_reader = StreamReader::new(body_with_io_error);
|
|
||||||
|
|
||||||
let builder = ZipEntryBuilder::new(file_name.into(), Compression::Deflate);
|
|
||||||
let mut entry_writer = writer
|
|
||||||
.write_entry_stream(builder)
|
|
||||||
.await
|
|
||||||
.unwrap()
|
|
||||||
.compat_write();
|
|
||||||
|
|
||||||
tokio::io::copy(&mut body_reader, &mut entry_writer)
|
|
||||||
.await
|
|
||||||
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?;
|
|
||||||
|
|
||||||
entry_writer
|
|
||||||
.into_inner()
|
|
||||||
.close()
|
|
||||||
.await
|
|
||||||
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut records = state.records.lock().await;
|
|
||||||
let record = UploadRecord::new(archive_path);
|
|
||||||
records.insert(cache_name.clone(), record.clone());
|
|
||||||
|
|
||||||
cache::write_to_cache(&records)
|
|
||||||
.await
|
|
||||||
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?;
|
|
||||||
|
|
||||||
writer.close().await.unwrap();
|
|
||||||
|
|
||||||
let id = cache_name;
|
|
||||||
let response = Response::builder()
|
|
||||||
.status(200)
|
|
||||||
.header("Content-Type", "text/html")
|
|
||||||
.header("HX-Push-Url", format!("/link/{}", &id))
|
|
||||||
.body(ssr::render(|| {
|
|
||||||
leptos::view! { <LinkView id record /> }
|
|
||||||
}))
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
Ok(response)
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn download(
|
async fn download(
|
||||||
axum::extract::Path(id): axum::extract::Path<String>,
|
axum::extract::Path(id): axum::extract::Path<String>,
|
||||||
headers: HeaderMap,
|
headers: HeaderMap,
|
||||||
|
|
|
@ -0,0 +1,101 @@
|
||||||
|
use std::path::Path;
|
||||||
|
|
||||||
|
use async_zip::{base::write::ZipFileWriter, Compression, ZipEntryBuilder};
|
||||||
|
use axum::{
|
||||||
|
extract::{DefaultBodyLimit, Multipart, State},
|
||||||
|
http::Response,
|
||||||
|
routing::post,
|
||||||
|
Router,
|
||||||
|
};
|
||||||
|
use futures::TryStreamExt;
|
||||||
|
use reqwest::StatusCode;
|
||||||
|
use sanitize_filename_reader_friendly::sanitize;
|
||||||
|
use tokio::io;
|
||||||
|
use tokio_util::{compat::FuturesAsyncWriteCompatExt, io::StreamReader};
|
||||||
|
use tower_http::limit::RequestBodyLimitLayer;
|
||||||
|
|
||||||
|
use crate::{
|
||||||
|
cache,
|
||||||
|
util::{self, ssr},
|
||||||
|
AppState, LinkView, UploadRecord,
|
||||||
|
};
|
||||||
|
|
||||||
|
pub fn get_upload_router() -> Router<AppState> {
|
||||||
|
// Upload needs a subrouter to increase the body limit
|
||||||
|
Router::new()
|
||||||
|
.route("/", post(upload_to_zip))
|
||||||
|
.layer(DefaultBodyLimit::disable())
|
||||||
|
.layer(RequestBodyLimitLayer::new(
|
||||||
|
10 * 1024 * 1024 * 1024, // 10GiB
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn upload_to_zip(
|
||||||
|
State(state): State<AppState>,
|
||||||
|
mut body: Multipart,
|
||||||
|
) -> Result<Response<String>, (StatusCode, String)> {
|
||||||
|
tracing::debug!("{:?}", *state.records.lock().await);
|
||||||
|
|
||||||
|
let cache_name = util::get_random_name(10);
|
||||||
|
|
||||||
|
let archive_path = Path::new(".cache/serve").join(format!("{}.zip", &cache_name));
|
||||||
|
|
||||||
|
tracing::debug!("Zipping: {:?}", &archive_path);
|
||||||
|
|
||||||
|
let mut archive = tokio::fs::File::create(&archive_path)
|
||||||
|
.await
|
||||||
|
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?;
|
||||||
|
let mut writer = ZipFileWriter::with_tokio(&mut archive);
|
||||||
|
|
||||||
|
while let Some(field) = body.next_field().await.unwrap() {
|
||||||
|
let file_name = match field.file_name() {
|
||||||
|
Some(file_name) => sanitize(file_name),
|
||||||
|
_ => continue,
|
||||||
|
};
|
||||||
|
|
||||||
|
tracing::debug!("Downloading to Zip: {file_name:?}");
|
||||||
|
|
||||||
|
let stream = field;
|
||||||
|
let body_with_io_error = stream.map_err(io::Error::other);
|
||||||
|
let mut body_reader = StreamReader::new(body_with_io_error);
|
||||||
|
|
||||||
|
let builder = ZipEntryBuilder::new(file_name.into(), Compression::Deflate);
|
||||||
|
let mut entry_writer = writer
|
||||||
|
.write_entry_stream(builder)
|
||||||
|
.await
|
||||||
|
.unwrap()
|
||||||
|
.compat_write();
|
||||||
|
|
||||||
|
tokio::io::copy(&mut body_reader, &mut entry_writer)
|
||||||
|
.await
|
||||||
|
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?;
|
||||||
|
|
||||||
|
entry_writer
|
||||||
|
.into_inner()
|
||||||
|
.close()
|
||||||
|
.await
|
||||||
|
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut records = state.records.lock().await;
|
||||||
|
let record = UploadRecord::new(archive_path);
|
||||||
|
records.insert(cache_name.clone(), record.clone());
|
||||||
|
|
||||||
|
cache::write_to_cache(&records)
|
||||||
|
.await
|
||||||
|
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?;
|
||||||
|
|
||||||
|
writer.close().await.unwrap();
|
||||||
|
|
||||||
|
let id = cache_name;
|
||||||
|
let response = Response::builder()
|
||||||
|
.status(200)
|
||||||
|
.header("Content-Type", "text/html")
|
||||||
|
.header("HX-Push-Url", format!("/link/{}", &id))
|
||||||
|
.body(ssr::render(|| {
|
||||||
|
leptos::view! { <LinkView id record /> }
|
||||||
|
}))
|
||||||
|
.unwrap();
|
||||||
|
|
||||||
|
Ok(response)
|
||||||
|
}
|
Loading…
Reference in New Issue