Compare commits

..

5 commits

Author SHA1 Message Date
5f4158e27d feat: begone memory map 2024-11-21 16:15:33 -08:00
87f68efd61 feat: typed get queries 2024-11-21 16:00:19 -08:00
a817502793 feat: delete no longer touches records 2024-11-21 15:30:08 -08:00
72210b211e chore: update .sqlx 2024-11-21 14:33:41 -08:00
07273fad14 feat: db downloads 2024-11-21 14:08:51 -08:00
18 changed files with 133 additions and 220 deletions

View file

@ -0,0 +1,12 @@
{
"db_name": "SQLite",
"query": "UPDATE records\nSET downloads = downloads + 1\nWHERE\n cache_name = ?\n AND downloads < max_downloads\n AND julianday('now') - julianday(uploaded) > 5;\n",
"describe": {
"columns": [],
"parameters": {
"Right": 1
},
"nullable": []
},
"hash": "a2f3905f8f8e14c95404f88f2e60dcf13e7b7caf5307bd19240655621251f7c9"
}

View file

@ -1,8 +1,6 @@
UPDATE record UPDATE records
SET downloads = downloads + 1 SET downloads = downloads + 1
WHERE cache_name = ? WHERE
RETURNING cache_name = ?
cache_name, AND downloads < max_downloads
uploaded, AND julianday('now') - julianday(uploaded) > 5;
downloads,
max_downloads;

View file

@ -1,7 +1,7 @@
SELECT SELECT
cache_name, cache_name,
uploaded, uploaded AS "uploaded: _",
downloads, downloads AS "downloads: i32",
max_downloads max_downloads AS "max_downloads: i32"
FROM records FROM records
WHERE cache_name = ?; WHERE cache_name = ?;

View file

@ -1,6 +1,6 @@
SELECT SELECT
cache_name, cache_name,
uploaded, uploaded AS "uploaded: _",
downloads, downloads AS "downloads: i32",
max_downloads max_downloads AS "max_downloads: i32"
FROM records; FROM records;

View file

@ -1,7 +1,7 @@
SELECT SELECT
cache_name, cache_name,
uploaded, uploaded AS "uploaded: _",
downloads, downloads AS "downloads: i32",
max_downloads max_downloads AS "max_downloads: i32"
FROM records FROM records
LIMIT ? OFFSET ?; LIMIT ? OFFSET ?;

View file

@ -1,2 +1,7 @@
INSERT INTO records (cache_name, max_downloads) INSERT INTO records (cache_name, max_downloads)
VALUES (?, ?); VALUES (?, ?)
RETURNING
cache_name,
uploaded AS "uploaded: _",
downloads AS "downloads: i32",
max_downloads AS "max_downloads: i32";

View file

@ -1,38 +0,0 @@
use crate::state::AppState;
use serde::Serialize;
use tokio::io::AsyncReadExt;
use std::io;
use std::collections::HashMap;
pub async fn write_to_cache<T, Y>(records: &HashMap<T, Y>) -> io::Result<()>
where
T: Serialize,
Y: Serialize,
{
let mut records_cache = tokio::fs::File::create(".cache/data").await.unwrap();
let mut buf: Vec<u8> = Vec::with_capacity(200);
bincode::serialize_into(&mut buf, records).map_err(io::Error::other)?;
let bytes_written = tokio::io::copy(&mut buf.as_slice(), &mut records_cache).await?;
tracing::debug!("state cache size: {}", bytes_written);
Ok(())
}
pub async fn fetch_cache() -> AppState {
let records = if let Ok(file) = tokio::fs::File::open(".cache/data").await.as_mut() {
let mut buf: Vec<u8> = Vec::with_capacity(200);
file.read_to_end(&mut buf).await.unwrap();
bincode::deserialize_from(&mut buf.as_slice()).unwrap()
} else {
HashMap::new()
};
AppState::new(records)
}

View file

@ -1,6 +1,5 @@
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
#[derive(sqlx::Type)]
pub struct CacheRecord { pub struct CacheRecord {
pub cache_name: String, pub cache_name: String,
pub uploaded: DateTime<Utc>, pub uploaded: DateTime<Utc>,

View file

@ -1,4 +1,3 @@
pub mod cache;
pub mod db; pub mod db;
pub mod router; pub mod router;
pub mod state; pub mod state;
@ -8,3 +7,7 @@ pub mod views;
pub use router::*; pub use router::*;
pub use state::*; pub use state::*;
pub use views::*; pub use views::*;
use std::{path::PathBuf, sync::LazyLock};
pub static CACHE_DIR: LazyLock<PathBuf> = LazyLock::new(|| PathBuf::from("./.cache/serve"));

View file

@ -22,7 +22,7 @@ async fn main() -> io::Result<()> {
// uses create_dir_all to create both .cache and serve inside it in one go // uses create_dir_all to create both .cache and serve inside it in one go
util::make_dir(".cache/serve").await?; util::make_dir(".cache/serve").await?;
let state = cache::fetch_cache().await; let state = AppState::new();
sweeper::spawn(state.clone()); sweeper::spawn(state.clone());

View file

@ -8,7 +8,7 @@ use axum::{
use reqwest::StatusCode; use reqwest::StatusCode;
use tokio_util::io::ReaderStream; use tokio_util::io::ReaderStream;
use crate::{AppState, AsyncRemoveRecord}; use crate::{AppState, CACHE_DIR};
pub fn get_download_router() -> Router<AppState> { pub fn get_download_router() -> Router<AppState> {
Router::new().route("/:id", get(download)) Router::new().route("/:id", get(download))
@ -18,23 +18,25 @@ async fn download(
axum::extract::Path(id): axum::extract::Path<String>, axum::extract::Path(id): axum::extract::Path<String>,
State(state): State<AppState>, State(state): State<AppState>,
) -> Result<axum::response::Response, (StatusCode, String)> { ) -> Result<axum::response::Response, (StatusCode, String)> {
{ let mut conn = state.pool.acquire().await.unwrap();
let mut records = state.records.lock().await;
if let Some(record) = records
.get_mut(&id)
.filter(|record| record.can_be_downloaded())
{
record.downloads += 1;
let file = tokio::fs::File::open(&record.file).await.unwrap(); let rows_affected = sqlx::query_file!("queries/records/get_and_update.sql", id)
.execute(&mut *conn)
.await
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?
.rows_affected();
return Ok(axum::response::Response::builder() drop(conn);
.header("Content-Type", "application/zip")
.body(Body::from_stream(ReaderStream::new(file))) if rows_affected > 0 {
.unwrap()); let file = tokio::fs::File::open(CACHE_DIR.join(id))
} else { .await
records.remove_record(&id).await.unwrap() .map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?;
}
return Ok(axum::response::Response::builder()
.header("Content-Type", "application/zip")
.body(Body::from_stream(ReaderStream::new(file)))
.unwrap());
} }
Ok(Redirect::to("/404.html").into_response()) Ok(Redirect::to("/404.html").into_response())

View file

@ -8,9 +8,10 @@ use axum_extra::TypedHeader;
use reqwest::StatusCode; use reqwest::StatusCode;
use crate::{ use crate::{
templates::{self, DownloadLinkTemplate}, db::CacheRecord,
templates::{DownloadLinkTemplate, DownloadsRemainingFragment},
util::headers::HxRequest, util::headers::HxRequest,
AppState, AsyncRemoveRecord, AppState,
}; };
pub fn get_link_router() -> Router<AppState> { pub fn get_link_router() -> Router<AppState> {
@ -22,47 +23,28 @@ pub fn get_link_router() -> Router<AppState> {
async fn link( async fn link(
axum::extract::Path(id): axum::extract::Path<String>, axum::extract::Path(id): axum::extract::Path<String>,
State(mut state): State<AppState>, State(state): State<AppState>,
) -> Result<impl IntoResponse, Redirect> { ) -> Result<impl IntoResponse, Redirect> {
{ let mut conn = state.pool.acquire().await.unwrap();
let mut records = state.records.lock().await;
if let Some(record) = records sqlx::query_file_as!(CacheRecord, "queries/records/get_record.sql", id)
.get_mut(&id) .fetch_one(&mut *conn)
.filter(|record| record.can_be_downloaded()) .await
{ .map(|record| DownloadLinkTemplate { record })
return Ok(DownloadLinkTemplate { .map_err(|_| Redirect::to("/404.html"))
id,
record: record.clone(),
});
}
}
// TODO: This....
state.remove_record(&id).await.unwrap();
Err(Redirect::to("/404.html"))
} }
async fn link_delete( async fn link_delete(
axum::extract::Path(id): axum::extract::Path<String>, axum::extract::Path(id): axum::extract::Path<String>,
State(mut state): State<AppState>, State(state): State<AppState>,
) -> Result<Html<String>, (StatusCode, String)> { ) -> Result<Html<String>, (StatusCode, String)> {
state let mut conn = state.pool.acquire().await.unwrap();
.remove_record(&id)
sqlx::query_file!("queries/records/remove_record.sql", id)
.execute(&mut *conn)
.await .await
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?; .map(|_| Html("".to_string()))
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))
{
let mut conn = state.pool.acquire().await.unwrap();
sqlx::query_file!("queries/records/remove_record.sql", id)
.execute(&mut *conn)
.await
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?;
}
Ok(Html("".to_string()))
} }
async fn remaining( async fn remaining(
@ -77,12 +59,11 @@ async fn remaining(
)); ));
} }
let records = state.records.lock().await; let mut conn = state.pool.acquire().await.unwrap();
Ok(Html( sqlx::query_file_as!(CacheRecord, "queries/records/get_record.sql", id)
records .fetch_one(&mut *conn)
.get(&id) .await
.map(templates::get_downloads_remaining_text) .map(|record| DownloadsRemainingFragment { record })
.unwrap_or_else(|| "?".to_string()), .map_err(|err| (StatusCode::NOT_FOUND, err.to_string()))
))
} }

View file

@ -6,13 +6,13 @@ use crate::{db::CacheRecordName, templates::LinkListTemplate, AppState};
pub fn get_records_router() -> Router<AppState> { pub fn get_records_router() -> Router<AppState> {
// Records views // Records views
Router::new() Router::new()
.route("/", get(records)) // .route("/", get(records))
.route("/links", get(records_links)) .route("/links", get(records_links))
} }
pub(crate) async fn records(State(state): State<AppState>) -> impl IntoResponse { // pub(crate) async fn records(State(state): State<AppState>) -> impl IntoResponse {
Json(state.records.lock().await.clone()) // Json(state.records.lock().await.clone())
} // }
// This function is to remain ugly until that time in which I properly hide // This function is to remain ugly until that time in which I properly hide
// this behind some kind of authentication // this behind some kind of authentication

View file

@ -14,7 +14,7 @@ use tokio::io;
use tokio_util::{compat::FuturesAsyncWriteCompatExt, io::StreamReader}; use tokio_util::{compat::FuturesAsyncWriteCompatExt, io::StreamReader};
use tower_http::limit::RequestBodyLimitLayer; use tower_http::limit::RequestBodyLimitLayer;
use crate::{cache, templates::DownloadLinkFragment, util, AppState, UploadRecord}; use crate::{db::CacheRecord, templates::DownloadLinkFragment, util, AppState};
pub fn get_upload_router() -> Router<AppState> { pub fn get_upload_router() -> Router<AppState> {
// Upload needs a subrouter to increase the body limit // Upload needs a subrouter to increase the body limit
@ -28,21 +28,44 @@ pub fn get_upload_router() -> Router<AppState> {
async fn upload_to_zip( async fn upload_to_zip(
State(state): State<AppState>, State(state): State<AppState>,
mut body: Multipart, body: Multipart,
) -> Result<Response, (StatusCode, String)> { ) -> Result<Response, (StatusCode, String)> {
tracing::debug!("{:?}", *state.records.lock().await);
let cache_name = util::get_random_name(10); let cache_name = util::get_random_name(10);
let archive_path = Path::new(".cache/serve").join(format!("{}.zip", &cache_name)); let archive_path = Path::new(".cache/serve").join(format!("{}.zip", &cache_name));
tracing::debug!("Zipping: {:?}", &archive_path); tracing::debug!("Zipping: {:?}", &archive_path);
let mut archive = tokio::fs::File::create(&archive_path) zip_body(&archive_path, body).await?;
let mut conn = state.pool.acquire().await.unwrap();
let record = sqlx::query_file_as!(CacheRecord, "queries/records/new_record.sql", cache_name, 5)
.fetch_one(&mut *conn)
.await
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?;
let id = cache_name;
let impl_response = (
StatusCode::OK,
[
("Content-Type", "text/html"),
("HX-Push-Url", &format!("/link/{}", &id)),
],
DownloadLinkFragment { record },
);
Ok(impl_response.into_response())
}
async fn zip_body(
archive_path: &std::path::PathBuf,
mut body: Multipart,
) -> Result<(), (StatusCode, String)> {
let mut archive = tokio::fs::File::create(archive_path)
.await .await
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?; .map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?;
let mut writer = ZipFileWriter::with_tokio(&mut archive); let mut writer = ZipFileWriter::with_tokio(&mut archive);
while let Some(field) = body.next_field().await.unwrap() { while let Some(field) = body.next_field().await.unwrap() {
let file_name = match field.file_name() { let file_name = match field.file_name() {
Some(file_name) => sanitize(file_name), Some(file_name) => sanitize(file_name),
@ -72,41 +95,6 @@ async fn upload_to_zip(
.await .await
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?; .map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?;
} }
let record = UploadRecord::new(archive_path);
let mut records = state.records.lock().await;
records.insert(cache_name.clone(), record.clone());
let records_cache = records.clone();
// Manually drop the records mutex guard
drop(records);
{
let mut conn = state.pool.acquire().await.unwrap();
let cache_name = cache_name.clone();
sqlx::query_file!("queries/records/new_record.sql", cache_name, 5)
.execute(&mut *conn)
.await
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?;
}
cache::write_to_cache(&records_cache)
.await
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?;
writer.close().await.unwrap(); writer.close().await.unwrap();
Ok(())
let id = cache_name;
let impl_response = (
StatusCode::OK,
[
("Content-Type", "text/html"),
("HX-Push-Url", &format!("/link/{}", &id)),
],
DownloadLinkFragment { id, record },
);
Ok(impl_response.into_response())
} }

View file

@ -1,18 +1,11 @@
use std::{ use std::{
collections::{hash_map::Entry, HashMap},
io::ErrorKind,
path::{Path, PathBuf}, path::{Path, PathBuf},
str::FromStr, str::FromStr,
sync::Arc,
}; };
use async_trait::async_trait;
use chrono::{DateTime, Duration, Utc}; use chrono::{DateTime, Duration, Utc};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use sqlx::{sqlite::SqliteConnectOptions, SqlitePool}; use sqlx::{sqlite::SqliteConnectOptions, SqlitePool};
use tokio::sync::Mutex;
use crate::cache;
#[allow(dead_code)] #[allow(dead_code)]
#[derive(Debug, Clone, Serialize, Deserialize)] #[derive(Debug, Clone, Serialize, Deserialize)]
@ -55,14 +48,12 @@ impl Default for UploadRecord {
#[derive(Clone)] #[derive(Clone)]
pub struct AppState { pub struct AppState {
pub records: Arc<Mutex<HashMap<String, UploadRecord>>>,
pub pool: SqlitePool, pub pool: SqlitePool,
} }
impl AppState { impl AppState {
pub fn new(records: HashMap<String, UploadRecord>) -> Self { pub fn new() -> Self {
Self { Self {
records: Arc::new(Mutex::new(records)),
pool: SqlitePool::connect_lazy_with( pool: SqlitePool::connect_lazy_with(
SqliteConnectOptions::from_str("sqlite:testing.db") SqliteConnectOptions::from_str("sqlite:testing.db")
.expect("Invalid Database String"), .expect("Invalid Database String"),
@ -71,34 +62,8 @@ impl AppState {
} }
} }
#[async_trait] impl Default for AppState {
pub trait AsyncRemoveRecord { fn default() -> Self {
async fn remove_record(&mut self, id: &str) -> Result<(), std::io::Error>; Self::new()
}
#[async_trait]
impl AsyncRemoveRecord for AppState {
async fn remove_record(&mut self, id: &str) -> Result<(), std::io::Error> {
let mut records = self.records.lock().await;
records.remove_record(id).await
}
}
#[async_trait]
impl AsyncRemoveRecord for HashMap<String, UploadRecord> {
async fn remove_record(&mut self, id: &str) -> Result<(), std::io::Error> {
match self.entry(id.to_string()) {
Entry::Occupied(entry) => {
tokio::fs::remove_file(&entry.get().file).await?;
entry.remove_entry();
cache::write_to_cache(self).await?;
Ok(())
}
Entry::Vacant(_) => Err(std::io::Error::new(
ErrorKind::Other,
"No UploadRecord Found",
)),
}
} }
} }

View file

@ -1,22 +1,22 @@
use std::time::Duration; use std::time::Duration;
use crate::state::{AppState, AsyncRemoveRecord}; use crate::state::AppState;
/// Spawn a repeating task that will clean files periodically /// Spawn a repeating task that will clean files periodically
pub fn spawn(state: AppState) { pub fn spawn(_state: AppState) {
tokio::spawn(async move { tokio::spawn(async move {
loop { loop {
tokio::time::sleep(Duration::from_secs(15 * 60)).await; tokio::time::sleep(Duration::from_secs(15 * 60)).await;
tracing::info!("Cleaning Sweep!"); tracing::info!("Cleaning Sweep!");
let mut records = state.records.lock().await; // let mut records = state.records.lock().await;
//
for (key, record) in records.clone().into_iter() { // for (key, record) in records.clone().into_iter() {
if !record.can_be_downloaded() { // if !record.can_be_downloaded() {
tracing::info!("culling: {:?}", record); // tracing::info!("culling: {:?}", record);
records.remove_record(&key).await.unwrap(); // records.remove_record(&key).await.unwrap();
} // }
} // }
} }
}); });
} }

View file

@ -1,6 +1,6 @@
use rinja_axum::Template; use rinja_axum::Template;
use crate::UploadRecord; use crate::db::CacheRecord;
#[derive(Template)] #[derive(Template)]
#[template(path = "welcome.html")] #[template(path = "welcome.html")]
@ -15,7 +15,7 @@ impl WelcomeTemplate {
} }
#[inline] #[inline]
pub fn get_downloads_remaining_text(record: &UploadRecord) -> String { pub fn get_downloads_remaining_text(record: &CacheRecord) -> String {
let downloads_remaining = record.max_downloads - record.downloads; let downloads_remaining = record.max_downloads - record.downloads;
let plural = if downloads_remaining > 1 { "s" } else { "" }; let plural = if downloads_remaining > 1 { "s" } else { "" };
@ -28,21 +28,19 @@ pub fn get_downloads_remaining_text(record: &UploadRecord) -> String {
#[derive(Template)] #[derive(Template)]
#[template(path = "link.html")] #[template(path = "link.html")]
pub struct DownloadLinkTemplate { pub struct DownloadLinkTemplate {
pub id: String, pub record: CacheRecord,
pub record: UploadRecord,
} }
#[derive(Template)] #[derive(Template)]
#[template(path = "link.html", block = "content")] #[template(path = "link.html", block = "content")]
pub struct DownloadLinkFragment { pub struct DownloadLinkFragment {
pub id: String, pub record: CacheRecord,
pub record: UploadRecord,
} }
#[derive(Template)] #[derive(Template)]
#[template(path = "link.html", block = "downloads_remaining")] #[template(path = "link.html", block = "downloads_remaining")]
pub struct DownloadsRemainingFragment { pub struct DownloadsRemainingFragment {
pub record: UploadRecord, pub record: CacheRecord,
} }
#[derive(Template)] #[derive(Template)]

View file

@ -4,12 +4,12 @@
<div class="form-wrapper"> <div class="form-wrapper">
<div class="column-container"> <div class="column-container">
<div class="link-wrapper"> <div class="link-wrapper">
<a id="link" href="/download/{{ id }}">Download Now!</a> <a id="link" href="/download/{{ record.cache_name }}">Download Now!</a>
</div> </div>
<div <div
class="link-wrapper" class="link-wrapper"
hx-get="/link/{{ id }}/remaining" hx-get="/link/{{ record.cache_name }}/remaining"
hx-trigger="click from:#link delay:0.2s, every 60s" hx-trigger="click from:#link delay:0.2s, every 60s"
> >
{% block downloads_remaining %} {% block downloads_remaining %}