diff --git a/queries/records/new_record.sql b/queries/records/new_record.sql index ac54e0e..148c17c 100644 --- a/queries/records/new_record.sql +++ b/queries/records/new_record.sql @@ -1,2 +1,7 @@ INSERT INTO records (cache_name, max_downloads) -VALUES (?, ?); +VALUES (?, ?) +RETURNING + cache_name, + uploaded AS "uploaded: _", + downloads AS "downloads: i32", + max_downloads AS "max_downloads: i32"; diff --git a/src/cache.rs b/src/cache.rs deleted file mode 100644 index 1a78458..0000000 --- a/src/cache.rs +++ /dev/null @@ -1,38 +0,0 @@ -use crate::state::AppState; - -use serde::Serialize; -use tokio::io::AsyncReadExt; - -use std::io; - -use std::collections::HashMap; - -pub async fn write_to_cache(records: &HashMap) -> io::Result<()> -where - T: Serialize, - Y: Serialize, -{ - let mut records_cache = tokio::fs::File::create(".cache/data").await.unwrap(); - - let mut buf: Vec = Vec::with_capacity(200); - bincode::serialize_into(&mut buf, records).map_err(io::Error::other)?; - - let bytes_written = tokio::io::copy(&mut buf.as_slice(), &mut records_cache).await?; - - tracing::debug!("state cache size: {}", bytes_written); - - Ok(()) -} - -pub async fn fetch_cache() -> AppState { - let records = if let Ok(file) = tokio::fs::File::open(".cache/data").await.as_mut() { - let mut buf: Vec = Vec::with_capacity(200); - file.read_to_end(&mut buf).await.unwrap(); - - bincode::deserialize_from(&mut buf.as_slice()).unwrap() - } else { - HashMap::new() - }; - - AppState::new(records) -} diff --git a/src/db.rs b/src/db.rs index c27b529..b1bf659 100644 --- a/src/db.rs +++ b/src/db.rs @@ -1,6 +1,5 @@ use chrono::{DateTime, Utc}; -#[derive(sqlx::Type)] pub struct CacheRecord { pub cache_name: String, pub uploaded: DateTime, diff --git a/src/lib.rs b/src/lib.rs index 3a8f04e..50e6b16 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,4 +1,3 @@ -pub mod cache; pub mod db; pub mod router; pub mod state; diff --git a/src/main.rs b/src/main.rs index a525e69..b11c713 100644 --- a/src/main.rs +++ b/src/main.rs @@ -22,7 +22,7 @@ async fn main() -> io::Result<()> { // uses create_dir_all to create both .cache and serve inside it in one go util::make_dir(".cache/serve").await?; - let state = cache::fetch_cache().await; + let state = AppState::new(); sweeper::spawn(state.clone()); diff --git a/src/router/link.rs b/src/router/link.rs index 9c86f8a..6238a66 100644 --- a/src/router/link.rs +++ b/src/router/link.rs @@ -8,9 +8,10 @@ use axum_extra::TypedHeader; use reqwest::StatusCode; use crate::{ - templates::{self, DownloadLinkTemplate}, + db::CacheRecord, + templates::{DownloadLinkTemplate, DownloadsRemainingFragment}, util::headers::HxRequest, - AppState, AsyncRemoveRecord, + AppState, }; pub fn get_link_router() -> Router { @@ -22,26 +23,15 @@ pub fn get_link_router() -> Router { async fn link( axum::extract::Path(id): axum::extract::Path, - State(mut state): State, + State(state): State, ) -> Result { - { - let mut records = state.records.lock().await; + let mut conn = state.pool.acquire().await.unwrap(); - if let Some(record) = records - .get_mut(&id) - .filter(|record| record.can_be_downloaded()) - { - return Ok(DownloadLinkTemplate { - id, - record: record.clone(), - }); - } - } - - // TODO: This.... - state.remove_record(&id).await.unwrap(); - - Err(Redirect::to("/404.html")) + sqlx::query_file_as!(CacheRecord, "queries/records/get_record.sql", id) + .fetch_one(&mut *conn) + .await + .map(|record| DownloadLinkTemplate { record }) + .map_err(|_| Redirect::to("/404.html")) } async fn link_delete( @@ -53,11 +43,8 @@ async fn link_delete( sqlx::query_file!("queries/records/remove_record.sql", id) .execute(&mut *conn) .await - .map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?; - - drop(conn); - - Ok(Html("".to_string())) + .map(|_| Html("".to_string())) + .map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string())) } async fn remaining( @@ -72,12 +59,11 @@ async fn remaining( )); } - let records = state.records.lock().await; + let mut conn = state.pool.acquire().await.unwrap(); - Ok(Html( - records - .get(&id) - .map(templates::get_downloads_remaining_text) - .unwrap_or_else(|| "?".to_string()), - )) + sqlx::query_file_as!(CacheRecord, "queries/records/get_record.sql", id) + .fetch_one(&mut *conn) + .await + .map(|record| DownloadsRemainingFragment { record }) + .map_err(|err| (StatusCode::NOT_FOUND, err.to_string())) } diff --git a/src/router/records.rs b/src/router/records.rs index 68f92f2..26c1bfd 100644 --- a/src/router/records.rs +++ b/src/router/records.rs @@ -6,13 +6,13 @@ use crate::{db::CacheRecordName, templates::LinkListTemplate, AppState}; pub fn get_records_router() -> Router { // Records views Router::new() - .route("/", get(records)) + // .route("/", get(records)) .route("/links", get(records_links)) } -pub(crate) async fn records(State(state): State) -> impl IntoResponse { - Json(state.records.lock().await.clone()) -} +// pub(crate) async fn records(State(state): State) -> impl IntoResponse { +// Json(state.records.lock().await.clone()) +// } // This function is to remain ugly until that time in which I properly hide // this behind some kind of authentication diff --git a/src/router/upload.rs b/src/router/upload.rs index 54f50e1..6677062 100644 --- a/src/router/upload.rs +++ b/src/router/upload.rs @@ -14,7 +14,7 @@ use tokio::io; use tokio_util::{compat::FuturesAsyncWriteCompatExt, io::StreamReader}; use tower_http::limit::RequestBodyLimitLayer; -use crate::{cache, templates::DownloadLinkFragment, util, AppState, UploadRecord}; +use crate::{db::CacheRecord, templates::DownloadLinkFragment, util, AppState}; pub fn get_upload_router() -> Router { // Upload needs a subrouter to increase the body limit @@ -28,10 +28,8 @@ pub fn get_upload_router() -> Router { async fn upload_to_zip( State(state): State, - mut body: Multipart, + body: Multipart, ) -> Result { - tracing::debug!("{:?}", *state.records.lock().await); - let cache_name = util::get_random_name(10); let archive_path = Path::new(".cache/serve").join(format!("{}.zip", &cache_name)); @@ -40,26 +38,10 @@ async fn upload_to_zip( zip_body(&archive_path, body).await?; - let record = UploadRecord::new(archive_path); - let mut records = state.records.lock().await; + let mut conn = state.pool.acquire().await.unwrap(); - records.insert(cache_name.clone(), record.clone()); - - let records_cache = records.clone(); - // Manually drop the records mutex guard - drop(records); - - { - let mut conn = state.pool.acquire().await.unwrap(); - let cache_name = cache_name.clone(); - - sqlx::query_file!("queries/records/new_record.sql", cache_name, 5) - .execute(&mut *conn) - .await - .map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?; - } - - cache::write_to_cache(&records_cache) + let record = sqlx::query_file_as!(CacheRecord, "queries/records/new_record.sql", cache_name, 5) + .fetch_one(&mut *conn) .await .map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?; @@ -70,7 +52,7 @@ async fn upload_to_zip( ("Content-Type", "text/html"), ("HX-Push-Url", &format!("/link/{}", &id)), ], - DownloadLinkFragment { id, record }, + DownloadLinkFragment { record }, ); Ok(impl_response.into_response()) diff --git a/src/state.rs b/src/state.rs index dcdb48b..903efd1 100644 --- a/src/state.rs +++ b/src/state.rs @@ -1,18 +1,11 @@ use std::{ - collections::{hash_map::Entry, HashMap}, - io::ErrorKind, path::{Path, PathBuf}, str::FromStr, - sync::Arc, }; -use async_trait::async_trait; use chrono::{DateTime, Duration, Utc}; use serde::{Deserialize, Serialize}; use sqlx::{sqlite::SqliteConnectOptions, SqlitePool}; -use tokio::sync::Mutex; - -use crate::cache; #[allow(dead_code)] #[derive(Debug, Clone, Serialize, Deserialize)] @@ -55,14 +48,12 @@ impl Default for UploadRecord { #[derive(Clone)] pub struct AppState { - pub records: Arc>>, pub pool: SqlitePool, } impl AppState { - pub fn new(records: HashMap) -> Self { + pub fn new() -> Self { Self { - records: Arc::new(Mutex::new(records)), pool: SqlitePool::connect_lazy_with( SqliteConnectOptions::from_str("sqlite:testing.db") .expect("Invalid Database String"), @@ -71,34 +62,8 @@ impl AppState { } } -#[async_trait] -pub trait AsyncRemoveRecord { - async fn remove_record(&mut self, id: &str) -> Result<(), std::io::Error>; -} - -#[async_trait] -impl AsyncRemoveRecord for AppState { - async fn remove_record(&mut self, id: &str) -> Result<(), std::io::Error> { - let mut records = self.records.lock().await; - records.remove_record(id).await - } -} - -#[async_trait] -impl AsyncRemoveRecord for HashMap { - async fn remove_record(&mut self, id: &str) -> Result<(), std::io::Error> { - match self.entry(id.to_string()) { - Entry::Occupied(entry) => { - tokio::fs::remove_file(&entry.get().file).await?; - entry.remove_entry(); - cache::write_to_cache(self).await?; - - Ok(()) - } - Entry::Vacant(_) => Err(std::io::Error::new( - ErrorKind::Other, - "No UploadRecord Found", - )), - } +impl Default for AppState { + fn default() -> Self { + Self::new() } } diff --git a/src/util/sweeper.rs b/src/util/sweeper.rs index bc48184..a2c1c3e 100644 --- a/src/util/sweeper.rs +++ b/src/util/sweeper.rs @@ -1,22 +1,22 @@ use std::time::Duration; -use crate::state::{AppState, AsyncRemoveRecord}; +use crate::state::AppState; /// Spawn a repeating task that will clean files periodically -pub fn spawn(state: AppState) { +pub fn spawn(_state: AppState) { tokio::spawn(async move { loop { tokio::time::sleep(Duration::from_secs(15 * 60)).await; tracing::info!("Cleaning Sweep!"); - let mut records = state.records.lock().await; - - for (key, record) in records.clone().into_iter() { - if !record.can_be_downloaded() { - tracing::info!("culling: {:?}", record); - records.remove_record(&key).await.unwrap(); - } - } + // let mut records = state.records.lock().await; + // + // for (key, record) in records.clone().into_iter() { + // if !record.can_be_downloaded() { + // tracing::info!("culling: {:?}", record); + // records.remove_record(&key).await.unwrap(); + // } + // } } }); } diff --git a/src/views/templates.rs b/src/views/templates.rs index 4ee7f5e..c6c94bf 100644 --- a/src/views/templates.rs +++ b/src/views/templates.rs @@ -1,6 +1,6 @@ use rinja_axum::Template; -use crate::UploadRecord; +use crate::db::CacheRecord; #[derive(Template)] #[template(path = "welcome.html")] @@ -15,7 +15,7 @@ impl WelcomeTemplate { } #[inline] -pub fn get_downloads_remaining_text(record: &UploadRecord) -> String { +pub fn get_downloads_remaining_text(record: &CacheRecord) -> String { let downloads_remaining = record.max_downloads - record.downloads; let plural = if downloads_remaining > 1 { "s" } else { "" }; @@ -28,21 +28,19 @@ pub fn get_downloads_remaining_text(record: &UploadRecord) -> String { #[derive(Template)] #[template(path = "link.html")] pub struct DownloadLinkTemplate { - pub id: String, - pub record: UploadRecord, + pub record: CacheRecord, } #[derive(Template)] #[template(path = "link.html", block = "content")] pub struct DownloadLinkFragment { - pub id: String, - pub record: UploadRecord, + pub record: CacheRecord, } #[derive(Template)] #[template(path = "link.html", block = "downloads_remaining")] pub struct DownloadsRemainingFragment { - pub record: UploadRecord, + pub record: CacheRecord, } #[derive(Template)] diff --git a/templates/link.html b/templates/link.html index 9be1b64..c61ca5a 100644 --- a/templates/link.html +++ b/templates/link.html @@ -4,12 +4,12 @@