Compare commits
1 commit
e44d80ee4c
...
a629485be9
Author | SHA1 | Date | |
---|---|---|---|
a629485be9 |
4 changed files with 18 additions and 13 deletions
|
@ -1,8 +1,8 @@
|
|||
-- Add up migration script here
|
||||
CREATE TABLE IF NOT EXISTS records (
|
||||
id INTEGER NOT NULL PRIMARY KEY,
|
||||
cache_name TEXT NOT NULL UNIQUE,
|
||||
uploaded TEXT NOT NULL DEFAULT (datetime('now')),
|
||||
file_path TEXT NOT NULL,
|
||||
downloads INTEGER NOT NULL DEFAULT 0,
|
||||
max_downloads INTEGER NOT NULL
|
||||
) STRICT;
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
UPDATE record
|
||||
SET downloads = downloads + 1
|
||||
WHERE cache_name = ?
|
||||
WHERE id = ?
|
|
@ -1,2 +1,2 @@
|
|||
INSERT INTO records (cache_name, max_downloads)
|
||||
INSERT INTO records (file_path, max_downloads)
|
||||
VALUES (?, ?);
|
||||
|
|
|
@ -13,6 +13,7 @@ use sanitize_filename_reader_friendly::sanitize;
|
|||
use tokio::io;
|
||||
use tokio_util::{compat::FuturesAsyncWriteCompatExt, io::StreamReader};
|
||||
use tower_http::limit::RequestBodyLimitLayer;
|
||||
use tracing::debug;
|
||||
|
||||
use crate::{askama::DownloadLinkFragment, cache, util, AppState, UploadRecord};
|
||||
|
||||
|
@ -76,22 +77,26 @@ async fn upload_to_zip(
|
|||
let record = UploadRecord::new(archive_path);
|
||||
let mut records = state.records.lock().await;
|
||||
|
||||
{
|
||||
let mut conn = state.pool.acquire().await.unwrap();
|
||||
let path = record.file.clone().into_os_string().into_string().unwrap();
|
||||
|
||||
let id = sqlx::query_file!("queries/records/new.sql", path, 5)
|
||||
.execute(&mut *conn)
|
||||
.await
|
||||
.unwrap()
|
||||
.last_insert_rowid();
|
||||
|
||||
// TODO: Looks like I actually gotta store cache_name lmfao
|
||||
debug!("Saved Record {id}");
|
||||
}
|
||||
|
||||
records.insert(cache_name.clone(), record.clone());
|
||||
|
||||
let records_cache = records.clone();
|
||||
// Manually drop the records mutex guard
|
||||
drop(records);
|
||||
|
||||
{
|
||||
let mut conn = state.pool.acquire().await.unwrap();
|
||||
let cache_name = cache_name.clone();
|
||||
|
||||
sqlx::query_file!("queries/records/new.sql", cache_name, 5)
|
||||
.execute(&mut *conn)
|
||||
.await
|
||||
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?;
|
||||
}
|
||||
|
||||
cache::write_to_cache(&records_cache)
|
||||
.await
|
||||
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?;
|
||||
|
|
Loading…
Reference in a new issue