Compare commits

..

4 commits

4 changed files with 13 additions and 18 deletions

View file

@ -1,8 +1,8 @@
-- Add up migration script here -- Add up migration script here
CREATE TABLE IF NOT EXISTS records ( CREATE TABLE IF NOT EXISTS records (
id INTEGER NOT NULL PRIMARY KEY, id INTEGER NOT NULL PRIMARY KEY,
cache_name TEXT NOT NULL UNIQUE,
uploaded TEXT NOT NULL DEFAULT (datetime('now')), uploaded TEXT NOT NULL DEFAULT (datetime('now')),
file_path TEXT NOT NULL,
downloads INTEGER NOT NULL DEFAULT 0, downloads INTEGER NOT NULL DEFAULT 0,
max_downloads INTEGER NOT NULL max_downloads INTEGER NOT NULL
) STRICT; ) STRICT;

View file

@ -1,3 +1,3 @@
UPDATE record UPDATE record
SET downloads = downloads + 1 SET downloads = downloads + 1
WHERE id = ? WHERE cache_name = ?

View file

@ -1,2 +1,2 @@
INSERT INTO records (file_path, max_downloads) INSERT INTO records (cache_name, max_downloads)
VALUES (?, ?); VALUES (?, ?);

View file

@ -13,7 +13,6 @@ use sanitize_filename_reader_friendly::sanitize;
use tokio::io; use tokio::io;
use tokio_util::{compat::FuturesAsyncWriteCompatExt, io::StreamReader}; use tokio_util::{compat::FuturesAsyncWriteCompatExt, io::StreamReader};
use tower_http::limit::RequestBodyLimitLayer; use tower_http::limit::RequestBodyLimitLayer;
use tracing::debug;
use crate::{askama::DownloadLinkFragment, cache, util, AppState, UploadRecord}; use crate::{askama::DownloadLinkFragment, cache, util, AppState, UploadRecord};
@ -77,26 +76,22 @@ async fn upload_to_zip(
let record = UploadRecord::new(archive_path); let record = UploadRecord::new(archive_path);
let mut records = state.records.lock().await; let mut records = state.records.lock().await;
{
let mut conn = state.pool.acquire().await.unwrap();
let path = record.file.clone().into_os_string().into_string().unwrap();
let id = sqlx::query_file!("queries/records/new.sql", path, 5)
.execute(&mut *conn)
.await
.unwrap()
.last_insert_rowid();
// TODO: Looks like I actually gotta store cache_name lmfao
debug!("Saved Record {id}");
}
records.insert(cache_name.clone(), record.clone()); records.insert(cache_name.clone(), record.clone());
let records_cache = records.clone(); let records_cache = records.clone();
// Manually drop the records mutex guard // Manually drop the records mutex guard
drop(records); drop(records);
{
let mut conn = state.pool.acquire().await.unwrap();
let cache_name = cache_name.clone();
sqlx::query_file!("queries/records/new.sql", cache_name, 5)
.execute(&mut *conn)
.await
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?;
}
cache::write_to_cache(&records_cache) cache::write_to_cache(&records_cache)
.await .await
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?; .map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?;