diff --git a/queries/records/get_record.sql b/queries/records/get_record.sql index 4773147..3480e84 100644 --- a/queries/records/get_record.sql +++ b/queries/records/get_record.sql @@ -1,7 +1,7 @@ SELECT cache_name, - uploaded, - downloads, - max_downloads + uploaded AS "uploaded: _", + downloads AS "downloads: i32", + max_downloads AS "max_downloads: i32" FROM records WHERE cache_name = ?; diff --git a/queries/records/get_records.sql b/queries/records/get_records.sql index 1a62878..4e31200 100644 --- a/queries/records/get_records.sql +++ b/queries/records/get_records.sql @@ -1,6 +1,6 @@ SELECT cache_name, - uploaded, - downloads, - max_downloads + uploaded AS "uploaded: _", + downloads AS "downloads: i32", + max_downloads AS "max_downloads: i32" FROM records; diff --git a/queries/records/get_records_page.sql b/queries/records/get_records_page.sql index 25e59b5..e0c24ad 100644 --- a/queries/records/get_records_page.sql +++ b/queries/records/get_records_page.sql @@ -1,7 +1,7 @@ SELECT cache_name, - uploaded, - downloads, - max_downloads + uploaded AS "uploaded: _", + downloads AS "downloads: i32", + max_downloads AS "max_downloads: i32" FROM records LIMIT ? OFFSET ?; diff --git a/src/router/upload.rs b/src/router/upload.rs index 48305af..54f50e1 100644 --- a/src/router/upload.rs +++ b/src/router/upload.rs @@ -38,11 +38,52 @@ async fn upload_to_zip( tracing::debug!("Zipping: {:?}", &archive_path); - let mut archive = tokio::fs::File::create(&archive_path) + zip_body(&archive_path, body).await?; + + let record = UploadRecord::new(archive_path); + let mut records = state.records.lock().await; + + records.insert(cache_name.clone(), record.clone()); + + let records_cache = records.clone(); + // Manually drop the records mutex guard + drop(records); + + { + let mut conn = state.pool.acquire().await.unwrap(); + let cache_name = cache_name.clone(); + + sqlx::query_file!("queries/records/new_record.sql", cache_name, 5) + .execute(&mut *conn) + .await + .map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?; + } + + cache::write_to_cache(&records_cache) + .await + .map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?; + + let id = cache_name; + let impl_response = ( + StatusCode::OK, + [ + ("Content-Type", "text/html"), + ("HX-Push-Url", &format!("/link/{}", &id)), + ], + DownloadLinkFragment { id, record }, + ); + + Ok(impl_response.into_response()) +} + +async fn zip_body( + archive_path: &std::path::PathBuf, + mut body: Multipart, +) -> Result<(), (StatusCode, String)> { + let mut archive = tokio::fs::File::create(archive_path) .await .map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?; let mut writer = ZipFileWriter::with_tokio(&mut archive); - while let Some(field) = body.next_field().await.unwrap() { let file_name = match field.file_name() { Some(file_name) => sanitize(file_name), @@ -72,41 +113,6 @@ async fn upload_to_zip( .await .map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?; } - - let record = UploadRecord::new(archive_path); - let mut records = state.records.lock().await; - - records.insert(cache_name.clone(), record.clone()); - - let records_cache = records.clone(); - // Manually drop the records mutex guard - drop(records); - - { - let mut conn = state.pool.acquire().await.unwrap(); - let cache_name = cache_name.clone(); - - sqlx::query_file!("queries/records/new_record.sql", cache_name, 5) - .execute(&mut *conn) - .await - .map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?; - } - - cache::write_to_cache(&records_cache) - .await - .map_err(|err| (StatusCode::INTERNAL_SERVER_ERROR, err.to_string()))?; - writer.close().await.unwrap(); - - let id = cache_name; - let impl_response = ( - StatusCode::OK, - [ - ("Content-Type", "text/html"), - ("HX-Push-Url", &format!("/link/{}", &id)), - ], - DownloadLinkFragment { id, record }, - ); - - Ok(impl_response.into_response()) + Ok(()) }