From 5d577df56a42347b42ce74173d25221a29656118 Mon Sep 17 00:00:00 2001 From: Zynh Ludwig Date: Sat, 16 Nov 2024 05:24:38 -0800 Subject: [PATCH] feat: upload sql error "handling" and reorder --- src/router/upload.rs | 25 ++++++++++--------------- 1 file changed, 10 insertions(+), 15 deletions(-) diff --git a/src/router/upload.rs b/src/router/upload.rs index 03f61e7..479673e 100644 --- a/src/router/upload.rs +++ b/src/router/upload.rs @@ -13,7 +13,6 @@ use sanitize_filename_reader_friendly::sanitize; use tokio::io; use tokio_util::{compat::FuturesAsyncWriteCompatExt, io::StreamReader}; use tower_http::limit::RequestBodyLimitLayer; -use tracing::debug; use crate::{askama::DownloadLinkFragment, cache, util, AppState, UploadRecord}; @@ -77,26 +76,22 @@ async fn upload_to_zip( let record = UploadRecord::new(archive_path); let mut records = state.records.lock().await; - { - let mut conn = state.pool.acquire().await.unwrap(); - let path = record.file.clone().into_os_string().into_string().unwrap(); - - let id = sqlx::query_file!("queries/records/new.sql", path, 5) - .execute(&mut *conn) - .await - .unwrap() - .last_insert_rowid(); - - // TODO: Looks like I actually gotta store cache_name lmfao - debug!("Saved Record {id}"); - } - records.insert(cache_name.clone(), record.clone()); let records_cache = records.clone(); // Manually drop the records mutex guard drop(records); + { + let mut conn = state.pool.acquire().await.unwrap(); + let cache_name = cache_name.clone(); + + sqlx::query_file!("queries/records/new.sql", cache_name, 5) + .execute(&mut *conn) + .await + .map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?; + } + cache::write_to_cache(&records_cache) .await .map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?;