diff --git a/Cargo.toml b/Cargo.toml index a5decab..39cf15a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "datatrash" -version = "1.1.5" +version = "1.1.6" authors = ["neri"] edition = "2021" diff --git a/src/config.rs b/src/config.rs index a7325a2..9379134 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,6 +1,7 @@ use std::env; use std::path::PathBuf; +use time::ext::NumericalDuration; use time::Duration; use tokio::fs; @@ -28,8 +29,8 @@ pub async fn get_config() -> Config { let max_file_size = env::var("UPLOAD_MAX_BYTES") .ok() .and_then(|variable| variable.parse().ok()) - .unwrap_or(8 * 1024 * 1024); - let max_file_size = (max_file_size != 0).then(|| max_file_size); + .or(Some(8 * 1024 * 1024)) + .filter(|&max_file_size| max_file_size != 0); let static_dir = PathBuf::from(env::var("STATIC_DIR").unwrap_or_else(|_| "./static".to_owned())); @@ -73,8 +74,8 @@ fn get_no_auth_limits() -> Option { (Some(auth_password), Some(max_time), Some(large_file_max_time), Some(large_file_size)) => { Some(NoAuthLimits { auth_password, - max_time: Duration::seconds(max_time as i64), - large_file_max_time: Duration::seconds(large_file_max_time as i64), + max_time: (max_time as i64).seconds(), + large_file_max_time: (large_file_max_time as i64).seconds(), large_file_size, }) } diff --git a/src/db.rs b/src/db.rs index de7bc74..1a7d49e 100644 --- a/src/db.rs +++ b/src/db.rs @@ -1,5 +1,6 @@ use sqlx::postgres::{PgPool, PgPoolOptions}; use std::env; +use time::ext::NumericalStdDuration; pub async fn setup_db() -> PgPool { let conn_url = &get_db_url(); @@ -7,7 +8,7 @@ pub async fn setup_db() -> PgPool { let pool = PgPoolOptions::new() .max_connections(5) - .acquire_timeout(std::time::Duration::from_secs(5)) + .acquire_timeout(5.std_seconds()) .connect(conn_url) .await .expect("could not create db pool"); diff --git a/src/multipart.rs b/src/multipart.rs index 9adfdac..dd0af39 100644 --- a/src/multipart.rs +++ b/src/multipart.rs @@ -7,8 +7,8 @@ use std::path::Path; use time::{Duration, OffsetDateTime}; use tokio::{fs::File, io::AsyncWriteExt}; -const MAX_UPLOAD_SECONDS: i64 = 31 * 24 * 60 * 60; -const DEFAULT_UPLOAD_SECONDS: u32 = 30 * 60; +const MAX_UPLOAD_DURATION: Duration = Duration::days(31); +const DEFAULT_UPLOAD_DURATION: Duration = Duration::minutes(30); pub(crate) struct UploadConfig { pub original_name: Option, @@ -24,7 +24,7 @@ pub(crate) async fn parse_multipart( ) -> Result { let mut original_name: Option = None; let mut content_type: Option = None; - let mut keep_for: Option = None; + let mut keep_for_seconds: Option = None; let mut delete_on_download = false; let mut password = None; let mut size = 0; @@ -34,7 +34,7 @@ pub(crate) async fn parse_multipart( let name = name.as_str(); match name { "keep_for" => { - keep_for = Some(parse_string(name, field).await?); + keep_for_seconds = Some(parse_string(name, field).await?); } "file" => { let (mime, uploaded_name) = get_file_metadata(&field); @@ -64,13 +64,13 @@ pub(crate) async fn parse_multipart( let content_type = content_type.ok_or_else(|| error::ErrorBadRequest("no content type found"))?; - let keep_for: u32 = keep_for + let keep_for = keep_for_seconds .map(|k| k.parse()) .transpose() .map_err(|e| error::ErrorBadRequest(format!("field keep_for is not a number: {}", e)))? - .unwrap_or(DEFAULT_UPLOAD_SECONDS); - let valid_duration = Duration::seconds(keep_for.into()); - let valid_till = OffsetDateTime::now_utc() + valid_duration; + .map(Duration::seconds) + .unwrap_or(DEFAULT_UPLOAD_DURATION); + let valid_till = OffsetDateTime::now_utc() + keep_for; let upload_config = UploadConfig { original_name, @@ -79,7 +79,7 @@ pub(crate) async fn parse_multipart( delete_on_download, }; - check_requirements(&upload_config, size, password, &valid_duration, config)?; + check_requirements(&upload_config, size, password, &keep_for, config)?; Ok(upload_config) } @@ -88,7 +88,7 @@ fn check_requirements( upload_config: &UploadConfig, size: u64, password: Option, - valid_duration: &Duration, + keep_for: &Duration, config: &config::Config, ) -> Result<(), error::Error> { if let Some(original_name) = upload_config.original_name.as_ref() { @@ -97,17 +97,16 @@ fn check_requirements( } } - let valid_seconds = valid_duration.whole_seconds(); - if valid_seconds > MAX_UPLOAD_SECONDS { + if *keep_for > MAX_UPLOAD_DURATION { return Err(error::ErrorBadRequest(format!( - "maximum allowed validity is {} seconds, but you specified {} seconds", - MAX_UPLOAD_SECONDS, valid_seconds + "maximum allowed validity is {}, but you specified {}", + MAX_UPLOAD_DURATION, keep_for ))); } if let Some(no_auth_limits) = &config.no_auth_limits { - let requires_auth = valid_seconds > no_auth_limits.max_time.whole_seconds() - || valid_seconds > no_auth_limits.large_file_max_time.whole_seconds() + let requires_auth = *keep_for > no_auth_limits.max_time + || *keep_for > no_auth_limits.large_file_max_time && size > no_auth_limits.large_file_size; // hIGh sECUriTy paSsWoRD CHEck if requires_auth && password.as_ref() != Some(&no_auth_limits.auth_password) { diff --git a/src/rate_limit.rs b/src/rate_limit.rs index f62a781..57a802b 100644 --- a/src/rate_limit.rs +++ b/src/rate_limit.rs @@ -14,11 +14,6 @@ impl KeyExtractor for ForwardedPeerIpKeyExtractor { type Key = IpAddr; type KeyExtractionError = &'static str; - #[cfg(feature = "log")] - fn name(&self) -> &'static str { - "Forwarded peer IP" - } - fn extract(&self, req: &ServiceRequest) -> Result { let forwarded_for = req.headers().get("x-forwarded-for"); if !self.proxied && forwarded_for.is_some() { @@ -44,8 +39,7 @@ impl KeyExtractor for ForwardedPeerIpKeyExtractor { ) } - #[cfg(feature = "log")] - fn key_name(&self, key: &Self::Key) -> Option { - Some(key.to_string()) + fn response_error(&self, err: Self::KeyExtractionError) -> actix_web::Error { + actix_web::error::ErrorUnauthorized(err.to_string()) } }