feat: typed get queries
This commit is contained in:
parent
a817502793
commit
87f68efd61
4 changed files with 53 additions and 47 deletions
|
@ -1,7 +1,7 @@
|
||||||
SELECT
|
SELECT
|
||||||
cache_name,
|
cache_name,
|
||||||
uploaded,
|
uploaded AS "uploaded: _",
|
||||||
downloads,
|
downloads AS "downloads: i32",
|
||||||
max_downloads
|
max_downloads AS "max_downloads: i32"
|
||||||
FROM records
|
FROM records
|
||||||
WHERE cache_name = ?;
|
WHERE cache_name = ?;
|
||||||
|
|
|
@ -1,6 +1,6 @@
|
||||||
SELECT
|
SELECT
|
||||||
cache_name,
|
cache_name,
|
||||||
uploaded,
|
uploaded AS "uploaded: _",
|
||||||
downloads,
|
downloads AS "downloads: i32",
|
||||||
max_downloads
|
max_downloads AS "max_downloads: i32"
|
||||||
FROM records;
|
FROM records;
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
SELECT
|
SELECT
|
||||||
cache_name,
|
cache_name,
|
||||||
uploaded,
|
uploaded AS "uploaded: _",
|
||||||
downloads,
|
downloads AS "downloads: i32",
|
||||||
max_downloads
|
max_downloads AS "max_downloads: i32"
|
||||||
FROM records
|
FROM records
|
||||||
LIMIT ? OFFSET ?;
|
LIMIT ? OFFSET ?;
|
||||||
|
|
|
@ -38,11 +38,52 @@ async fn upload_to_zip(
|
||||||
|
|
||||||
tracing::debug!("Zipping: {:?}", &archive_path);
|
tracing::debug!("Zipping: {:?}", &archive_path);
|
||||||
|
|
||||||
let mut archive = tokio::fs::File::create(&archive_path)
|
zip_body(&archive_path, body).await?;
|
||||||
|
|
||||||
|
let record = UploadRecord::new(archive_path);
|
||||||
|
let mut records = state.records.lock().await;
|
||||||
|
|
||||||
|
records.insert(cache_name.clone(), record.clone());
|
||||||
|
|
||||||
|
let records_cache = records.clone();
|
||||||
|
// Manually drop the records mutex guard
|
||||||
|
drop(records);
|
||||||
|
|
||||||
|
{
|
||||||
|
let mut conn = state.pool.acquire().await.unwrap();
|
||||||
|
let cache_name = cache_name.clone();
|
||||||
|
|
||||||
|
sqlx::query_file!("queries/records/new_record.sql", cache_name, 5)
|
||||||
|
.execute(&mut *conn)
|
||||||
|
.await
|
||||||
|
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?;
|
||||||
|
}
|
||||||
|
|
||||||
|
cache::write_to_cache(&records_cache)
|
||||||
|
.await
|
||||||
|
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?;
|
||||||
|
|
||||||
|
let id = cache_name;
|
||||||
|
let impl_response = (
|
||||||
|
StatusCode::OK,
|
||||||
|
[
|
||||||
|
("Content-Type", "text/html"),
|
||||||
|
("HX-Push-Url", &format!("/link/{}", &id)),
|
||||||
|
],
|
||||||
|
DownloadLinkFragment { id, record },
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(impl_response.into_response())
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn zip_body(
|
||||||
|
archive_path: &std::path::PathBuf,
|
||||||
|
mut body: Multipart,
|
||||||
|
) -> Result<(), (StatusCode, String)> {
|
||||||
|
let mut archive = tokio::fs::File::create(archive_path)
|
||||||
.await
|
.await
|
||||||
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?;
|
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?;
|
||||||
let mut writer = ZipFileWriter::with_tokio(&mut archive);
|
let mut writer = ZipFileWriter::with_tokio(&mut archive);
|
||||||
|
|
||||||
while let Some(field) = body.next_field().await.unwrap() {
|
while let Some(field) = body.next_field().await.unwrap() {
|
||||||
let file_name = match field.file_name() {
|
let file_name = match field.file_name() {
|
||||||
Some(file_name) => sanitize(file_name),
|
Some(file_name) => sanitize(file_name),
|
||||||
|
@ -72,41 +113,6 @@ async fn upload_to_zip(
|
||||||
.await
|
.await
|
||||||
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?;
|
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?;
|
||||||
}
|
}
|
||||||
|
|
||||||
let record = UploadRecord::new(archive_path);
|
|
||||||
let mut records = state.records.lock().await;
|
|
||||||
|
|
||||||
records.insert(cache_name.clone(), record.clone());
|
|
||||||
|
|
||||||
let records_cache = records.clone();
|
|
||||||
// Manually drop the records mutex guard
|
|
||||||
drop(records);
|
|
||||||
|
|
||||||
{
|
|
||||||
let mut conn = state.pool.acquire().await.unwrap();
|
|
||||||
let cache_name = cache_name.clone();
|
|
||||||
|
|
||||||
sqlx::query_file!("queries/records/new_record.sql", cache_name, 5)
|
|
||||||
.execute(&mut *conn)
|
|
||||||
.await
|
|
||||||
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
cache::write_to_cache(&records_cache)
|
|
||||||
.await
|
|
||||||
.map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?;
|
|
||||||
|
|
||||||
writer.close().await.unwrap();
|
writer.close().await.unwrap();
|
||||||
|
Ok(())
|
||||||
let id = cache_name;
|
|
||||||
let impl_response = (
|
|
||||||
StatusCode::OK,
|
|
||||||
[
|
|
||||||
("Content-Type", "text/html"),
|
|
||||||
("HX-Push-Url", &format!("/link/{}", &id)),
|
|
||||||
],
|
|
||||||
DownloadLinkFragment { id, record },
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(impl_response.into_response())
|
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue