diff --git a/src/client/commands/slurm.rs b/src/client/commands/slurm.rs index 64dcd8a1..a2600067 100644 --- a/src/client/commands/slurm.rs +++ b/src/client/commands/slurm.rs @@ -3076,11 +3076,11 @@ fn fetch_sacct_for_workflow( let output_file = dir.join(format!("sacct_{}.json", slurm_job_id)); if let Err(e) = fs::write(&output_file, stdout.as_bytes()) { error!( - "Failed to write sacct output for job {}: {}", + "Failed to write sacct output for slurm_job_id={}: {}", slurm_job_id, e ); errors.push(format!( - "Job {}: Failed to write output: {}", + "slurm_job_id={}: Failed to write output: {}", slurm_job_id, e )); } else { @@ -3089,21 +3089,35 @@ fn fetch_sacct_for_workflow( } } Err(e) => { - error!("Failed to parse sacct JSON for job {}: {}", slurm_job_id, e); - errors - .push(format!("Job {}: Invalid JSON output: {}", slurm_job_id, e)); + error!( + "Failed to parse sacct JSON for slurm_job_id={}: {}", + slurm_job_id, e + ); + errors.push(format!( + "slurm_job_id={}: Invalid JSON output: {}", + slurm_job_id, e + )); } } } else { let stderr = String::from_utf8_lossy(&output.stderr); - error!("sacct command failed for job {}: {}", slurm_job_id, stderr); - errors.push(format!("Job {}: sacct failed: {}", slurm_job_id, stderr)); + error!( + "sacct command failed for slurm_job_id={}: {}", + slurm_job_id, stderr + ); + errors.push(format!( + "slurm_job_id={}: sacct failed: {}", + slurm_job_id, stderr + )); } } Err(e) => { - error!("Failed to run sacct for job {}: {}", slurm_job_id, e); + error!( + "Failed to run sacct for slurm_job_id={}: {}", + slurm_job_id, e + ); errors.push(format!( - "Job {}: Failed to execute sacct: {}", + "slurm_job_id={}: Failed to execute sacct: {}", slurm_job_id, e )); } diff --git a/src/client/hpc/hpc_manager.rs b/src/client/hpc/hpc_manager.rs index fb633bd5..8f59f8d9 100644 --- a/src/client/hpc/hpc_manager.rs +++ b/src/client/hpc/hpc_manager.rs @@ -64,7 +64,7 @@ impl HpcManager { /// The current status of the job pub fn get_status(&self, job_id: &str) -> Result { let info = self.interface.get_status(job_id)?; - trace!("Job {} status: {:?}", job_id, info.status); + trace!("hpc_job_id={} status={:?}", job_id, info.status); Ok(info.status) } diff --git a/src/client/job_runner.rs b/src/client/job_runner.rs index d0f6d4de..c9ac4776 100644 --- a/src/client/job_runner.rs +++ b/src/client/job_runner.rs @@ -1241,12 +1241,12 @@ impl JobRunner { // First pass: send termination signal to all running jobs for (job_id, async_job) in self.running_jobs.iter_mut() { info!( - "Job {} workflow_id={} job_id={}", + "Sending signal={} workflow_id={} job_id={}", termination_signal, self.workflow_id, job_id ); if let Err(e) = async_job.send_signal(termination_signal) { warn!( - "Job {} failed workflow_id={} job_id={} error={}", + "Failed to send signal={} workflow_id={} job_id={} error={}", termination_signal, self.workflow_id, job_id, e ); } @@ -1574,7 +1574,7 @@ impl JobRunner { // If any files are missing, return error if !missing_files.is_empty() { return Err(format!( - "Job {} completed successfully but expected output files are missing: {}", + "job_id={} completed successfully but expected output files are missing: {}", job_id, missing_files.join(", ") )); diff --git a/src/client/resource_correction.rs b/src/client/resource_correction.rs index 6c0d6618..b426040d 100644 --- a/src/client/resource_correction.rs +++ b/src/client/resource_correction.rs @@ -655,7 +655,7 @@ fn apply_upscale_for_adjustment( (adjustment.job_ids.first(), adjustment.job_names.first()) { info!( - "Job {} ({}): memory violation, increasing memory {} -> {} \ + "job_id={} name='{}': memory violation, increasing memory {} -> {} \ ({}x fallback for OOM without reliable peak data)", job_id, job_name, adjustment.current_memory, new_memory, opts.memory_multiplier ); @@ -675,7 +675,7 @@ fn apply_upscale_for_adjustment( (adjustment.job_ids.first(), adjustment.job_names.first()) { info!( - "Job {} ({}): memory violation, peak usage {} -> allocating {} ({}x)", + "job_id={} name='{}': memory violation, peak usage {} -> allocating {} ({}x)", job_id, job_name, format_memory_bytes_short(max_peak), @@ -717,7 +717,7 @@ fn apply_upscale_for_adjustment( (adjustment.job_ids.first(), adjustment.job_names.first()) { info!( - "Job {} ({}): Timeout detected, increasing runtime {} -> {}", + "job_id={} name='{}': Timeout detected, increasing runtime {} -> {}", job_id, job_name, adjustment.current_runtime, new_runtime ); } @@ -755,7 +755,7 @@ fn apply_upscale_for_adjustment( (adjustment.job_ids.first(), adjustment.job_names.first()) { info!( - "Job {} ({}): Runtime violation detected, peak {}m -> allocating {} ({}x)", + "job_id={} name='{}': Runtime violation detected, peak {}m -> allocating {} ({}x)", job_id, job_name, max_peak_runtime, new_runtime, opts.runtime_multiplier ); } @@ -792,7 +792,7 @@ fn apply_upscale_for_adjustment( (adjustment.job_ids.first(), adjustment.job_names.first()) { info!( - "Job {} ({}): CPU over-utilization detected, peak {}% -> allocating {} CPUs ({:.1}x safety margin)", + "job_id={} name='{}': CPU over-utilization detected, peak {}% -> allocating {} CPUs ({:.1}x safety margin)", job_id, job_name, max_peak_cpu, new_cpus, opts.cpu_multiplier ); } diff --git a/src/client/resource_monitor.rs b/src/client/resource_monitor.rs index a13ef204..db97351f 100644 --- a/src/client/resource_monitor.rs +++ b/src/client/resource_monitor.rs @@ -946,7 +946,7 @@ fn run_monitoring_loop( && memory_bytes > limit { warn!( - "Job {} (PID {}) exceeded memory limit: {}MB > {}MB", + "job_id={} pid={} exceeded memory limit: {}MB > {}MB", job.job_id, pid, memory_bytes / (1024 * 1024), @@ -959,7 +959,10 @@ fn run_monitoring_loop( memory_bytes, limit_bytes: limit, }) { - error!("Failed to send OOM violation for job {}: {}", job.job_id, e); + error!( + "Failed to send OOM violation for job_id={}: {}", + job.job_id, e + ); } } @@ -968,7 +971,7 @@ fn run_monitoring_loop( } debug!( - "Job {} (PID {}): CPU={:.1}%, Mem={:.1}MB, Procs={}", + "job_id={} pid={}: CPU={:.1}%, Mem={:.1}MB, Procs={}", job.job_id, pid, cpu_percent, diff --git a/src/client/workflow_manager.rs b/src/client/workflow_manager.rs index d54bc9b9..f6a8e734 100644 --- a/src/client/workflow_manager.rs +++ b/src/client/workflow_manager.rs @@ -1020,7 +1020,7 @@ impl WorkflowManager { let job_status = match &job.status { Some(status) => status, None => { - warn!("Job {} has no status, skipping", job_id); + warn!("job_id={} has no status, skipping", job_id); continue; } }; @@ -1031,7 +1031,7 @@ impl WorkflowManager { if dry_run { // If dry run is true, just log the change info!( - "Dry run: Would reset job {} (name: '{}') from {:?} to Uninitialized due to file change in {} (id: {})", + "Dry run: Would reset job_id={} name='{}' from status={:?} to Uninitialized due to file change in '{}' file_id={}", job_id, &job.name, job_status, file.name, file_id ); @@ -1056,7 +1056,7 @@ impl WorkflowManager { }; info!( - "Dry run: Would reset downstream job {} (name: '{}' status: {:?}) to Uninitialized", + "Dry run: Would reset downstream job_id={} name='{}' status={:?} to Uninitialized", downstream_job_id, &downstream_job.name, downstream_job.status ); } @@ -1069,13 +1069,13 @@ impl WorkflowManager { ) { Ok(_) => { info!( - "Reset job {} (name: '{}') from {:?} to Uninitialized due to file change in {} (id: {})", + "Reset job_id={} name='{}' from status={:?} to Uninitialized due to file change in '{}' file_id={}", job_id, &job.name, job_status, file.name, file_id ); } Err(err) => { panic!( - "Failed to reset job {} status due to file change: {}", + "Failed to reset job_id={} status due to file change: {}", job_id, err ); } @@ -1085,7 +1085,7 @@ impl WorkflowManager { _ => { // Job is not Completed, Failed, or Canceled, no action needed debug!( - "Job {} (name: '{}') has status {:?}, no reset needed for file change in {} (id: {})", + "job_id={} name='{}' has status={:?}, no reset needed for file change in '{}' file_id={}", job_id, &job.name, job_status, file.name, file_id ); } diff --git a/src/mcp_server/tools.rs b/src/mcp_server/tools.rs index 750d813a..8a388fb4 100644 --- a/src/mcp_server/tools.rs +++ b/src/mcp_server/tools.rs @@ -1807,17 +1807,17 @@ pub fn regroup_job_resources( for (i, group) in groups.iter().enumerate() { if group.job_ids.is_empty() { - errors.push(format!("Group {} has no job_ids", i)); + errors.push(format!("Group index={} has no job_ids", i)); } for &job_id in &group.job_ids { if !job_map.contains_key(&job_id) { errors.push(format!( - "Job {} in group {} does not belong to workflow {}", + "job_id={} in group index={} does not belong to workflow_id={}", job_id, i, workflow_id )); } if !all_job_ids.insert(job_id) { - errors.push(format!("Job {} appears in multiple groups", job_id)); + errors.push(format!("job_id={} appears in multiple groups", job_id)); } } } diff --git a/src/server/api.rs b/src/server/api.rs index af146200..a07e6303 100644 --- a/src/server/api.rs +++ b/src/server/api.rs @@ -1,5 +1,6 @@ //! Common API module with shared imports and traits +use crate::models; use crate::server::transport_types::context_types::ApiError; use log::{debug, error, info}; use sqlx::sqlite::SqlitePool; @@ -126,6 +127,49 @@ pub async fn begin_immediate( pool.begin_with("BEGIN IMMEDIATE").await } +/// Parse a job status integer into a [`models::JobStatus`]. +/// +/// Database row reads return the encoded integer value; if the integer is out +/// of range (e.g. after a downgrade from a future schema), `JobStatus::from_int` +/// reports the failure. This helper centralises the log line and the +/// `ApiError` conversion so handlers can write +/// `let status = parse_job_status(status_int, job_id)?;` instead of a 7-line +/// match. The `job_id` is included in the log line so the offending row can +/// be located. +pub fn parse_job_status(status_int: i32, job_id: i64) -> Result { + models::JobStatus::from_int(status_int).map_err(|e| { + error!( + "Failed to parse job status job_id={} status={} error={}", + job_id, status_int, e + ); + ApiError(format!( + "Failed to parse job status for job_id={}: {}", + job_id, e + )) + }) +} + +/// Build an `ErrorResponse` whose body is `{"message": }`. +/// +/// API handlers historically construct this shape inline with +/// `models::ErrorResponse::new(serde_json::json!({"message": ...}))`. Use this +/// helper to keep call sites focused on intent. +pub fn message_error_response(message: impl Into) -> models::ErrorResponse { + models::ErrorResponse::new(serde_json::json!({"message": message.into()})) +} + +/// Build a `" not found with ID: "` error response. +/// +/// `resource` is the human-readable resource name (e.g., `"Workflow"`, `"Job"`). +/// The wording matches what handlers already produce, so error bodies remain +/// stable for clients. +pub fn resource_not_found_response( + resource: &str, + id: impl std::fmt::Display, +) -> models::ErrorResponse { + message_error_response(format!("{} not found with ID: {}", resource, id)) +} + /// Escape SQL LIKE wildcard characters in user input. /// Escapes `%`, `_`, and `\` with a backslash prefix. pub fn escape_like_pattern(input: &str) -> String { @@ -221,6 +265,86 @@ mod tests { } } +/// Inside an async fn that holds an open `sqlx::Transaction`, evaluate +/// `$expr` (a `Result<_, _>`) and short-circuit on `Err` by rolling the +/// transaction back and returning the error from the enclosing function. +/// +/// This collapses the pervasive +/// ```ignore +/// let v = match $expr { +/// Ok(v) => v, +/// Err(e) => { +/// let _ = $tx.rollback().await; +/// return Err(e); +/// } +/// }; +/// ``` +/// pattern into a single line. Use it where the open transaction would +/// otherwise be left dangling on the error path; without rollback the +/// connection is stuck in `BEGIN IMMEDIATE` until the runtime drops it. +#[macro_export] +macro_rules! tx_try { + ($tx:expr, $expr:expr $(,)?) => { + match $expr { + Ok(v) => v, + Err(e) => { + let _ = $tx.rollback().await; + return Err(e); + } + } + }; +} + +/// Build a paginated list response model with the canonical +/// `(items, offset, max_limit, count, total_count, has_more)` shape. +/// +/// Every `ListXxxResponse` model shares this field layout, so most list +/// endpoints recompute `count = items.len() as i64` and +/// `has_more = offset + count < total_count` by hand. Use this macro to +/// collapse that boilerplate into a single struct-literal expression. +/// +/// `max_limit` defaults to [`MAX_RECORD_TRANSFER_COUNT`]. Pass +/// `max_limit = ` to override (e.g. when the response should echo back +/// the caller's clamped page size rather than the global cap). +/// +/// # Examples +/// +/// ```ignore +/// use crate::paginated_list_response; +/// let response = paginated_list_response!( +/// models::ListJobsResponse, +/// items, +/// offset, +/// total_count +/// ); +/// ``` +#[macro_export] +macro_rules! paginated_list_response { + ($model:path, $items:expr, $offset:expr, $total_count:expr $(,)?) => { + $crate::paginated_list_response!( + $model, + $items, + $offset, + $total_count, + max_limit = $crate::MAX_RECORD_TRANSFER_COUNT + ) + }; + ($model:path, $items:expr, $offset:expr, $total_count:expr, max_limit = $max_limit:expr $(,)?) => {{ + let items = $items; + let offset: i64 = $offset; + let total_count: i64 = $total_count; + let count = items.len() as i64; + $model { + items, + offset, + max_limit: $max_limit, + count, + total_count, + has_more: offset + count < total_count, + } + }}; +} + /// Common pagination response structure #[derive(Debug)] pub struct PaginationInfo { diff --git a/src/server/api/compute_nodes.rs b/src/server/api/compute_nodes.rs index 1d98584d..63b3ed2e 100644 --- a/src/server/api/compute_nodes.rs +++ b/src/server/api/compute_nodes.rs @@ -14,7 +14,7 @@ use crate::server::api_responses::{ use crate::models; -use super::{ApiContext, MAX_RECORD_TRANSFER_COUNT, SqlQueryBuilder, database_error_with_msg}; +use super::{ApiContext, SqlQueryBuilder, database_error_with_msg, resource_not_found_response}; /// Trait defining compute node-related API operations #[async_trait] @@ -252,11 +252,8 @@ where { Ok(Some(record)) => record, Ok(None) => { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Compute node not found with ID: {}", id) - })); return Ok(GetComputeNodeResponse::NotFoundErrorResponse( - error_response, + resource_not_found_response("Compute node", id), )); } Err(e) => { @@ -369,9 +366,7 @@ where where_conditions.push("json_extract(scheduler, '$.scheduler_id') = ?".to_string()); } let where_clause = where_conditions.join(" AND "); - - // Validate sort_by against whitelist - let validated_sort_by = if let Some(ref col) = sort_by { + let sort_by = if let Some(ref col) = sort_by { if COMPUTE_NODE_COLUMNS.contains(&col.as_str()) { Some(col.clone()) } else { @@ -387,7 +382,7 @@ where .with_pagination_and_sorting( offset, limit, - validated_sort_by, + sort_by, reverse_sort, "id", COMPUTE_NODE_COLUMNS, @@ -472,28 +467,22 @@ where } }; - let current_count = items.len() as i64; - let offset_val = offset; - let has_more = offset_val + current_count < total_count; + let response = crate::paginated_list_response!( + models::ListComputeNodesResponse, + items, + offset, + total_count + ); debug!( "list_compute_nodes({}, {}/{}) - X-Span-ID: {:?}", workflow_id, - current_count, + response.count, total_count, context.get().0.clone() ); - Ok(ListComputeNodesResponse::SuccessfulResponse( - models::ListComputeNodesResponse { - items, - offset: offset_val, - max_limit: MAX_RECORD_TRANSFER_COUNT, - count: current_count, - total_count, - has_more, - }, - )) + Ok(ListComputeNodesResponse::SuccessfulResponse(response)) } /// Update a compute node. @@ -588,11 +577,8 @@ where }; if result.rows_affected() == 0 { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Compute node not found with ID: {}", id) - })); return Ok(UpdateComputeNodeResponse::NotFoundErrorResponse( - error_response, + resource_not_found_response("Compute node", id), )); } diff --git a/src/server/api/events.rs b/src/server/api/events.rs index 47555b44..3cd3a56e 100644 --- a/src/server/api/events.rs +++ b/src/server/api/events.rs @@ -16,8 +16,8 @@ use crate::server::api_responses::{ use crate::models; use super::{ - ApiContext, MAX_RECORD_TRANSFER_COUNT, SqlQueryBuilder, database_error_with_msg, - json_parse_error, + ApiContext, SqlQueryBuilder, database_error_with_msg, json_parse_error, + resource_not_found_response, }; /// Trait defining event-related API operations @@ -146,10 +146,9 @@ where { Ok(Some(rec)) => rec, Ok(None) => { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Event not found with ID: {}", id) - })); - return Ok(GetEventResponse::NotFoundErrorResponse(error_response)); + return Ok(GetEventResponse::NotFoundErrorResponse( + resource_not_found_response("Event", id), + )); } Err(e) => { return Err(database_error_with_msg(e, "Failed to fetch event")); @@ -214,9 +213,7 @@ where let _category = category; // Acknowledge the parameter to avoid unused warnings let where_clause = where_conditions.join(" AND "); - - // Validate sort_by against whitelist - let validated_sort_by = if let Some(ref col) = sort_by { + let sort_by = if let Some(ref col) = sort_by { if EVENT_COLUMNS.contains(&col.as_str()) { Some(col.clone()) } else { @@ -230,14 +227,7 @@ where // Build the complete query with pagination and sorting let query = SqlQueryBuilder::new(base_query) .with_where(where_clause.clone()) - .with_pagination_and_sorting( - offset, - limit, - validated_sort_by, - reverse_sort, - "id", - EVENT_COLUMNS, - ) + .with_pagination_and_sorting(offset, limit, sort_by, reverse_sort, "id", EVENT_COLUMNS) .build(); debug!("Executing query: {}", query); @@ -298,28 +288,18 @@ where } }; - let current_count = items.len() as i64; - let offset_val = offset; - let has_more = offset_val + current_count < total_count; + let response = + crate::paginated_list_response!(models::ListEventsResponse, items, offset, total_count); debug!( "list_events({}, {}/{}) - X-Span-ID: {:?}", workflow_id, - current_count, + response.count, total_count, context.get().0.clone() ); - Ok(ListEventsResponse::SuccessfulResponse( - models::ListEventsResponse { - items, - offset: offset_val, - max_limit: MAX_RECORD_TRANSFER_COUNT, - count: current_count, - total_count, - has_more, - }, - )) + Ok(ListEventsResponse::SuccessfulResponse(response)) } /// Update an event. @@ -371,10 +351,9 @@ where }; if result.rows_affected() == 0 { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Event not found with ID: {}", id) - })); - return Ok(UpdateEventResponse::NotFoundErrorResponse(error_response)); + return Ok(UpdateEventResponse::NotFoundErrorResponse( + resource_not_found_response("Event", id), + )); } // Return the updated event by fetching it again diff --git a/src/server/api/failure_handlers.rs b/src/server/api/failure_handlers.rs index 3fb38b9e..8339ede0 100644 --- a/src/server/api/failure_handlers.rs +++ b/src/server/api/failure_handlers.rs @@ -13,7 +13,9 @@ use crate::server::api_responses::{ use crate::models; -use super::{ApiContext, MAX_RECORD_TRANSFER_COUNT, database_error_with_msg}; +use super::{ + ApiContext, MAX_RECORD_TRANSFER_COUNT, database_error_with_msg, resource_not_found_response, +}; /// Trait defining failure handler-related API operations #[async_trait] @@ -131,11 +133,8 @@ where { Ok(Some(record)) => record, Ok(None) => { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Failure handler not found with ID: {}", id) - })); return Ok(GetFailureHandlerResponse::NotFoundErrorResponse( - error_response, + resource_not_found_response("Failure handler", id), )); } Err(e) => { @@ -210,8 +209,6 @@ where }) .collect(); - let count = items.len() as i64; - // Get total count let total_count = match sqlx::query!( r#"SELECT COUNT(*) as total FROM failure_handler WHERE workflow_id = $1"#, @@ -229,17 +226,13 @@ where } }; - let has_more = offset + count < total_count; - Ok(ListFailureHandlersResponse::SuccessfulResponse( - models::ListFailureHandlersResponse { + crate::paginated_list_response!( + models::ListFailureHandlersResponse, items, offset, - max_limit: MAX_RECORD_TRANSFER_COUNT, - count, - total_count, - has_more, - }, + total_count + ), )) } @@ -269,11 +262,8 @@ where }; if result.rows_affected() == 0 { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Failure handler not found with ID: {}", id) - })); return Ok(DeleteFailureHandlerResponse::NotFoundErrorResponse( - error_response, + resource_not_found_response("Failure handler", id), )); } diff --git a/src/server/api/files.rs b/src/server/api/files.rs index b4bfe633..7429ce4b 100644 --- a/src/server/api/files.rs +++ b/src/server/api/files.rs @@ -14,7 +14,7 @@ use crate::server::api_responses::{ use crate::models; -use super::{ApiContext, MAX_RECORD_TRANSFER_COUNT, SqlQueryBuilder, database_error_with_msg}; +use super::{ApiContext, SqlQueryBuilder, database_error_with_msg, resource_not_found_response}; /// Trait defining file-related API operations #[async_trait] @@ -182,10 +182,9 @@ where { Ok(Some(rec)) => rec, Ok(None) => { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("File not found with ID: {}", id) - })); - return Ok(GetFileResponse::NotFoundErrorResponse(error_response)); + return Ok(GetFileResponse::NotFoundErrorResponse( + resource_not_found_response("File", id), + )); } Err(e) => { return Err(database_error_with_msg(e, "Failed to fetch file")); @@ -281,9 +280,7 @@ where } let where_clause = where_conditions.join(" AND "); - - // Validate sort_by against whitelist - let validated_sort_by = if let Some(ref col) = sort_by { + let sort_by = if let Some(ref col) = sort_by { if FILE_COLUMNS.contains(&col.as_str()) { // If we have a join (needs_join is true), prefix with "f." if it's a file column if needs_join { @@ -307,7 +304,7 @@ where .with_pagination_and_sorting( offset, limit, - validated_sort_by, + sort_by, reverse_sort, sort_column, FILE_COLUMNS, @@ -383,28 +380,18 @@ where } }; - let current_count = items.len() as i64; - let offset_val = offset; - let has_more = offset_val + current_count < total_count; + let response = + crate::paginated_list_response!(models::ListFilesResponse, items, offset, total_count); debug!( "list_files({}, {}/{}) - X-Span-ID: {:?}", workflow_id, - current_count, + response.count, total_count, context.get().0.clone() ); - Ok(ListFilesResponse::SuccessfulResponse( - models::ListFilesResponse { - items, - offset: offset_val, - max_limit: MAX_RECORD_TRANSFER_COUNT, - count: current_count, - total_count, - has_more, - }, - )) + Ok(ListFilesResponse::SuccessfulResponse(response)) } /// Return files that are marked as required to exist but don't exist. @@ -501,10 +488,9 @@ where }; if result.rows_affected() == 0 { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("File not found with ID: {}", id) - })); - return Ok(UpdateFileResponse::NotFoundErrorResponse(error_response)); + return Ok(UpdateFileResponse::NotFoundErrorResponse( + resource_not_found_response("File", id), + )); } // Return the updated file by fetching it again diff --git a/src/server/api/jobs.rs b/src/server/api/jobs.rs index d9adc7fd..28800c7d 100644 --- a/src/server/api/jobs.rs +++ b/src/server/api/jobs.rs @@ -23,8 +23,8 @@ use crate::models::{self as models, JobStatus}; use super::{ ApiContext, MAX_RECORD_TRANSFER_COUNT, SqlQueryBuilder, begin_immediate, - database_error_with_msg, deserialize_env_map, normalize_env_map, serialize_env_map, - validate_env_map, + database_error_with_msg, deserialize_env_map, message_error_response, normalize_env_map, + parse_job_status, resource_not_found_response, serialize_env_map, validate_env_map, }; /// Trait defining job-related API operations @@ -169,6 +169,28 @@ const JOB_COLUMNS: &[&str] = &[ "attempt_id", ]; +/// Returns true if `body_field` is `Some` and its contents differ from `existing_field` +/// when both are compared as **sets** (order-independent). +/// +/// `update_job` treats relationship-id vectors (`input_file_ids`, `depends_on_job_ids`, +/// etc.) as sets: a request that sends the same IDs in a different order is not a +/// modification. This helper centralises the sort-then-compare so the call sites can +/// stay one-liners. +fn vec_id_set_changed( + body_field: &Option>, + existing_field: &Option>, +) -> bool { + let Some(body_vec) = body_field else { + return false; + }; + let mut body_sorted = body_vec.clone(); + body_sorted.sort(); + let empty: Vec = Vec::new(); + let mut existing_sorted = existing_field.as_ref().unwrap_or(&empty).clone(); + existing_sorted.sort(); + body_sorted != existing_sorted +} + impl JobsApiImpl { pub fn new(context: ApiContext) -> Self { Self { context } @@ -326,16 +348,7 @@ impl JobsApiImpl { }; let status_int: i32 = record.get("status"); - let status = match JobStatus::from_int(status_int) { - Ok(s) => s, - Err(e) => { - error!( - "Failed to parse job status '{}' for job {}: {}", - status_int, id, e - ); - return Err(ApiError(format!("Failed to parse job status: {}", e))); - } - }; + let status = parse_job_status(status_int, id)?; // Get depends_on relationships let depends_on_records = match sqlx::query!( @@ -530,16 +543,9 @@ impl JobsApiImpl { let job_id = job_row.id; let current_status_int = job_row.status as i32; - // Parse the current status - let current_status = match JobStatus::from_int(current_status_int) { - Ok(status) => status, - Err(e) => { - error!( - "Failed to parse current job status '{}': {}", - current_status_int, e - ); - continue; - } + // Parse the current status; skip jobs whose status integer cannot be decoded. + let Ok(current_status) = parse_job_status(current_status_int, job_id) else { + continue; }; // Reset the job status @@ -1176,19 +1182,16 @@ where let supports_termination = job.supports_termination.unwrap_or(false); let priority = job.priority.unwrap_or(0); if priority < 0 { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("priority must be >= 0, got {} for job '{}'", priority, job.name) - })); return Ok(CreateJobResponse::UnprocessableContentErrorResponse( - error_response, + message_error_response(format!( + "priority must be >= 0, got {} for job '{}'", + priority, job.name + )), )); } if let Err(err) = validate_env_map(job.env.as_ref(), "job env") { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": err.0 - })); return Ok(CreateJobResponse::UnprocessableContentErrorResponse( - error_response, + message_error_response(err.0), )); } let status = JobStatus::Uninitialized; @@ -1205,21 +1208,12 @@ where } }; - let workflow_env = match Self::fetch_workflow_env(&mut *tx, job.workflow_id).await { - Ok(env) => env, - Err(e) => { - let _ = tx.rollback().await; - return Err(e); - } - }; + let workflow_env = crate::tx_try!( + tx, + Self::fetch_workflow_env(&mut *tx, job.workflow_id).await + ); job.env = Self::merge_env(workflow_env.as_ref(), job.env.as_ref()); - let job_env = match serialize_env_map(job.env.clone(), "job env") { - Ok(env) => env, - Err(e) => { - let _ = tx.rollback().await; - return Err(e); - } - }; + let job_env = crate::tx_try!(tx, serialize_env_map(job.env.clone(), "job env")); let job_result = match sqlx::query( r#" @@ -1270,21 +1264,25 @@ where // Handle job dependencies if let Some(depends_on_job_ids) = &job.depends_on_job_ids { for blocking_id in depends_on_job_ids { - if let Err(e) = self - .add_depends_on_association(&mut *tx, job_id, *blocking_id, job.workflow_id) + crate::tx_try!( + tx, + self.add_depends_on_association( + &mut *tx, + job_id, + *blocking_id, + job.workflow_id + ) .await - { - let _ = tx.rollback().await; - return Err(e); - } + ); } } // Handle input files if let Some(input_file_ids) = &job.input_file_ids { for file_id in input_file_ids { - if let Err(e) = self - .add_job_file_association( + crate::tx_try!( + tx, + self.add_job_file_association( &mut *tx, job_id, *file_id, @@ -1292,18 +1290,16 @@ where "job_input_file", ) .await - { - let _ = tx.rollback().await; - return Err(e); - } + ); } } // Handle output files if let Some(output_file_ids) = &job.output_file_ids { for file_id in output_file_ids { - if let Err(e) = self - .add_job_file_association( + crate::tx_try!( + tx, + self.add_job_file_association( &mut *tx, job_id, *file_id, @@ -1311,46 +1307,39 @@ where "job_output_file", ) .await - { - let _ = tx.rollback().await; - return Err(e); - } + ); } } // Handle input user_data if let Some(input_user_data_ids) = &job.input_user_data_ids { for user_data_id in input_user_data_ids { - if let Err(e) = self - .add_job_user_data_association( + crate::tx_try!( + tx, + self.add_job_user_data_association( &mut *tx, job_id, *user_data_id, "job_input_user_data", ) .await - { - let _ = tx.rollback().await; - return Err(e); - } + ); } } // Handle output user_data if let Some(output_user_data_ids) = &job.output_user_data_ids { for user_data_id in output_user_data_ids { - if let Err(e) = self - .add_job_user_data_association( + crate::tx_try!( + tx, + self.add_job_user_data_association( &mut *tx, job_id, *user_data_id, "job_output_user_data", ) .await - { - let _ = tx.rollback().await; - return Err(e); - } + ); } } @@ -1416,11 +1405,8 @@ where for mut job in body.jobs { if let Err(err) = validate_env_map(job.env.as_ref(), "job env") { let _ = transaction.rollback().await; - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": err.0 - })); return Ok(CreateJobsResponse::UnprocessableContentErrorResponse( - error_response, + message_error_response(err.0), )); } let workflow_env = match workflow_env_cache.get(&job.workflow_id) { @@ -1452,11 +1438,11 @@ where let priority = job.priority.unwrap_or(0); if priority < 0 { let _ = transaction.rollback().await; - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("priority must be >= 0, got {} for job '{}'", priority, job.name) - })); return Ok(CreateJobsResponse::UnprocessableContentErrorResponse( - error_response, + message_error_response(format!( + "priority must be >= 0, got {} for job '{}'", + priority, job.name + )), )); } let status = JobStatus::Uninitialized; @@ -1692,12 +1678,9 @@ where debug!("get_job({}) - X-Span-ID: {:?}", id, context.get().0.clone()); match self.get_job_with_relationships(id).await { Ok(job) => Ok(GetJobResponse::SuccessfulResponse(job)), - Err(ApiError(msg)) if msg.contains("not found") => { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Job not found with ID: {}", id) - })); - Ok(GetJobResponse::NotFoundErrorResponse(error_response)) - } + Err(ApiError(msg)) if msg.contains("not found") => Ok( + GetJobResponse::NotFoundErrorResponse(resource_not_found_response("Job", id)), + ), Err(e) => Err(e), } } @@ -1820,9 +1803,7 @@ where } let where_clause = where_conditions.join(" AND "); - - // Validate sort_by against whitelist - let validated_sort_by = if let Some(ref col) = sort_by { + let sort_by = if let Some(ref col) = sort_by { if JOB_COLUMNS.contains(&col.as_str()) { Some(col.clone()) } else { @@ -1836,14 +1817,7 @@ where // Build the complete query with pagination and sorting let query = SqlQueryBuilder::new(base_query) .with_where(where_clause.clone()) - .with_pagination_and_sorting( - offset, - limit, - validated_sort_by, - reverse_sort, - "id", - JOB_COLUMNS, - ) + .with_pagination_and_sorting(offset, limit, sort_by, reverse_sort, "id", JOB_COLUMNS) .build(); debug!("Executing query: {}", query); @@ -1893,16 +1867,7 @@ where } else { // Create job model without relationships for better performance let status_int: i32 = record.get("status"); - let status = match JobStatus::from_int(status_int) { - Ok(s) => s, - Err(e) => { - error!( - "Failed to parse job status '{}' for job {}: {}", - status_int, job_id, e - ); - return Err(ApiError(format!("Failed to parse job status: {}", e))); - } - }; + let status = parse_job_status(status_int, job_id)?; items.push(models::JobModel { id: Some(record.get("id")), @@ -1963,28 +1928,18 @@ where } }; - let current_count = items.len() as i64; - let offset_val = offset; - let has_more = offset_val + current_count < total_count; + let response = + crate::paginated_list_response!(models::ListJobsResponse, items, offset, total_count); debug!( "list_jobs({}, {}/{}) - X-Span-ID: {:?}", workflow_id, - current_count, + response.count, total_count, context.get().0.clone() ); - Ok(ListJobsResponse::SuccessfulResponse( - models::ListJobsResponse { - items, - offset: offset_val, - max_limit: MAX_RECORD_TRANSFER_COUNT, - count: current_count, - total_count, - has_more, - }, - )) + Ok(ListJobsResponse::SuccessfulResponse(response)) } /// Update a job. @@ -2016,10 +1971,9 @@ where let existing_job = match self.get_job_with_relationships(id).await { Ok(job) => job, Err(ApiError(msg)) if msg.contains("not found") => { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Job not found with ID: {}", id) - })); - return Ok(UpdateJobResponse::NotFoundErrorResponse(error_response)); + return Ok(UpdateJobResponse::NotFoundErrorResponse( + resource_not_found_response("Job", id), + )); } Err(e) => return Err(e), }; @@ -2028,11 +1982,8 @@ where let existing_status = match existing_job.status { Some(status) => status, None => { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": "Cannot update job - job has no status set" - })); return Ok(UpdateJobResponse::UnprocessableContentErrorResponse( - error_response, + message_error_response("Cannot update job - job has no status set"), )); } }; @@ -2041,23 +1992,26 @@ where // (scheduler_id and resource_requirements_id can be updated regardless of status) // All fields are checked by comparing to existing values, since the client may // send the full job object with only scheduler_id/resource_requirements_id changed - // Note: If body field is None, we treat it as "not changing" that field + // Note: If body field is None, we treat it as "not changing" that field. + // Vector relationship fields are compared as sets (order-independent) so a + // re-send of the same IDs in a different order is not flagged as a change. let name_changed = body.name != existing_job.name; let command_changed = body.command != existing_job.command; let body_env = normalize_env_map(body.env.clone()); let env_changed = body.env.is_some() && body_env != existing_job.env; - // Treat None as "not changing" - only compare if body.status is Some let status_changed = body.status.is_some() && body.status != existing_job.status; let input_file_ids_changed = - body.input_file_ids.is_some() && body.input_file_ids != existing_job.input_file_ids; + vec_id_set_changed(&body.input_file_ids, &existing_job.input_file_ids); let output_file_ids_changed = - body.output_file_ids.is_some() && body.output_file_ids != existing_job.output_file_ids; - let input_user_data_ids_changed = body.input_user_data_ids.is_some() - && body.input_user_data_ids != existing_job.input_user_data_ids; - let output_user_data_ids_changed = body.output_user_data_ids.is_some() - && body.output_user_data_ids != existing_job.output_user_data_ids; - let depends_on_job_ids_changed = body.depends_on_job_ids.is_some() - && body.depends_on_job_ids != existing_job.depends_on_job_ids; + vec_id_set_changed(&body.output_file_ids, &existing_job.output_file_ids); + let input_user_data_ids_changed = + vec_id_set_changed(&body.input_user_data_ids, &existing_job.input_user_data_ids); + let output_user_data_ids_changed = vec_id_set_changed( + &body.output_user_data_ids, + &existing_job.output_user_data_ids, + ); + let depends_on_job_ids_changed = + vec_id_set_changed(&body.depends_on_job_ids, &existing_job.depends_on_job_ids); let has_restricted_updates = name_changed || command_changed @@ -2106,18 +2060,15 @@ where changed_fields.push("depends_on_job_ids".to_string()); } - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!( + return Ok(UpdateJobResponse::UnprocessableContentErrorResponse( + message_error_response(format!( "Cannot update job {} when status is '{}' - most updates are only allowed when status is 'uninitialized'. \ Only scheduler_id and resource_requirements_id can be updated at any time. \ Changed fields: [{}]", id, existing_status, changed_fields.join(", ") - ) - })); - return Ok(UpdateJobResponse::UnprocessableContentErrorResponse( - error_response, + )), )); } @@ -2127,113 +2078,51 @@ where && *new_status != *existing_status && *new_status != models::JobStatus::Disabled { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": "Cannot update job status - this field is immutable after job creation (except to Disabled)" - })); return Ok(UpdateJobResponse::UnprocessableContentErrorResponse( - error_response, + message_error_response( + "Cannot update job status - this field is immutable after job creation (except to Disabled)", + ), )); } - // Check if depends_on_job_ids is being modified - let mut depends_on_job_ids_modified = false; - if let Some(depends_on_ids) = &body.depends_on_job_ids { - let empty_vec = vec![]; - let existing_depends_on = existing_job - .depends_on_job_ids - .as_ref() - .unwrap_or(&empty_vec); - let mut body_sorted = depends_on_ids.clone(); - let mut existing_sorted = existing_depends_on.clone(); - body_sorted.sort(); - existing_sorted.sort(); - if body_sorted != existing_sorted { - depends_on_job_ids_modified = true; - } - } - - // Validate other immutable fields - return error if they are set in body but don't match current job - - if let Some(input_file_ids) = &body.input_file_ids { - let empty_vec = vec![]; - let existing_input_files = existing_job.input_file_ids.as_ref().unwrap_or(&empty_vec); - let mut body_sorted = input_file_ids.clone(); - let mut existing_sorted = existing_input_files.clone(); - body_sorted.sort(); - existing_sorted.sort(); - if body_sorted != existing_sorted { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": "Cannot modify input_file_ids - this field is immutable after job creation" - })); - return Ok(UpdateJobResponse::UnprocessableContentErrorResponse( - error_response, - )); - } + // Validate immutable fields. `input_file_ids`, `output_file_ids`, + // `input_user_data_ids`, `output_user_data_ids`, and `env` are immutable after + // job creation (`depends_on_job_ids` is mutable and handled separately below). + // The *_changed flags above already use set-comparison semantics, so a single + // check per field is enough. + if input_file_ids_changed { + return Ok(UpdateJobResponse::UnprocessableContentErrorResponse( + message_error_response( + "Cannot modify input_file_ids - this field is immutable after job creation", + ), + )); } - - if let Some(output_file_ids) = &body.output_file_ids { - let empty_vec = vec![]; - let existing_output_files = existing_job.output_file_ids.as_ref().unwrap_or(&empty_vec); - let mut body_sorted = output_file_ids.clone(); - let mut existing_sorted = existing_output_files.clone(); - body_sorted.sort(); - existing_sorted.sort(); - if body_sorted != existing_sorted { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": "Cannot modify output_file_ids - this field is immutable after job creation" - })); - return Ok(UpdateJobResponse::UnprocessableContentErrorResponse( - error_response, - )); - } + if output_file_ids_changed { + return Ok(UpdateJobResponse::UnprocessableContentErrorResponse( + message_error_response( + "Cannot modify output_file_ids - this field is immutable after job creation", + ), + )); } - - if let Some(input_user_data_ids) = &body.input_user_data_ids { - let empty_vec = vec![]; - let existing_input_user_data = existing_job - .input_user_data_ids - .as_ref() - .unwrap_or(&empty_vec); - let mut body_sorted = input_user_data_ids.clone(); - let mut existing_sorted = existing_input_user_data.clone(); - body_sorted.sort(); - existing_sorted.sort(); - if body_sorted != existing_sorted { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": "Cannot modify input_user_data_ids - this field is immutable after job creation" - })); - return Ok(UpdateJobResponse::UnprocessableContentErrorResponse( - error_response, - )); - } + if input_user_data_ids_changed { + return Ok(UpdateJobResponse::UnprocessableContentErrorResponse( + message_error_response( + "Cannot modify input_user_data_ids - this field is immutable after job creation", + ), + )); } - - if let Some(output_user_data_ids) = &body.output_user_data_ids { - let empty_vec = vec![]; - let existing_output_user_data = existing_job - .output_user_data_ids - .as_ref() - .unwrap_or(&empty_vec); - let mut body_sorted = output_user_data_ids.clone(); - let mut existing_sorted = existing_output_user_data.clone(); - body_sorted.sort(); - existing_sorted.sort(); - if body_sorted != existing_sorted { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": "Cannot modify output_user_data_ids - this field is immutable after job creation" - })); - return Ok(UpdateJobResponse::UnprocessableContentErrorResponse( - error_response, - )); - } + if output_user_data_ids_changed { + return Ok(UpdateJobResponse::UnprocessableContentErrorResponse( + message_error_response( + "Cannot modify output_user_data_ids - this field is immutable after job creation", + ), + )); } - if env_changed { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": "Cannot modify env - this field is immutable after job creation" - })); return Ok(UpdateJobResponse::UnprocessableContentErrorResponse( - error_response, + message_error_response( + "Cannot modify env - this field is immutable after job creation", + ), )); } @@ -2243,11 +2132,8 @@ where if let Some(p) = body.priority && p < 0 { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("priority must be >= 0, got {}", p) - })); return Ok(UpdateJobResponse::UnprocessableContentErrorResponse( - error_response, + message_error_response(format!("priority must be >= 0, got {}", p)), )); } @@ -2287,14 +2173,13 @@ where }; if result.rows_affected() == 0 { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Job not found with ID: {}", id) - })); - return Ok(UpdateJobResponse::NotFoundErrorResponse(error_response)); + return Ok(UpdateJobResponse::NotFoundErrorResponse( + resource_not_found_response("Job", id), + )); } // If depends_on_job_ids was modified, update the relationships - if depends_on_job_ids_modified { + if depends_on_job_ids_changed { // Start a transaction for relationship updates let mut tx = match self.context.pool.begin().await { Ok(tx) => tx, @@ -2302,32 +2187,32 @@ where }; // Delete existing depends_on relationships for this job - if let Err(e) = sqlx::query!("DELETE FROM job_depends_on WHERE job_id = $1", id) - .execute(&mut *tx) - .await - { - let _ = tx.rollback().await; - return Err(database_error_with_msg( - e, - "Failed to delete old job dependencies", - )); - } + crate::tx_try!( + tx, + sqlx::query!("DELETE FROM job_depends_on WHERE job_id = $1", id) + .execute(&mut *tx) + .await + .map_err(|e| database_error_with_msg( + e, + "Failed to delete old job dependencies" + )) + ); // Add new depends_on relationships if provided if let Some(depends_on_ids) = &body.depends_on_job_ids { for blocking_id in depends_on_ids { - if let Err(e) = sqlx::query!( - "INSERT INTO job_depends_on (job_id, depends_on_job_id, workflow_id) VALUES ($1, $2, $3)", - id, - *blocking_id, - existing_job.workflow_id - ) - .execute(&mut *tx) - .await - { - let _ = tx.rollback().await; - return Err(database_error_with_msg(e, "Failed to update job dependencies")); - } + crate::tx_try!( + tx, + sqlx::query!( + "INSERT INTO job_depends_on (job_id, depends_on_job_id, workflow_id) VALUES ($1, $2, $3)", + id, + *blocking_id, + existing_job.workflow_id + ) + .execute(&mut *tx) + .await + .map_err(|e| database_error_with_msg(e, "Failed to update job dependencies")) + ); } } @@ -2383,10 +2268,9 @@ where }; if result.rows_affected() == 0 { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Job not found with ID: {}", id) - })); - return Ok(UpdateJobResponse::NotFoundErrorResponse(error_response)); + return Ok(UpdateJobResponse::NotFoundErrorResponse( + resource_not_found_response("Job", id), + )); } // Return the updated job by fetching it again with relationships @@ -2697,7 +2581,7 @@ where // Compare hashes if current_hash != stored_hash { debug!( - "Job {} ({}) input hash changed: stored={}, current={}", + "job_id={} name='{}' input hash changed: stored={}, current={}", job_id, job_name, stored_hash, current_hash ); jobs_to_reinitialize.push((job_id, job_name.clone())); @@ -2954,10 +2838,9 @@ where Ok(Some(record)) => record, Ok(None) => { let _ = sqlx::query("ROLLBACK").execute(&mut *conn).await; - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Job not found with ID: {}", id) - })); - return Ok(RetryJobResponse::NotFoundErrorResponse(error_response)); + return Ok(RetryJobResponse::NotFoundErrorResponse( + resource_not_found_response("Job", id), + )); } Err(e) => { let _ = sqlx::query("ROLLBACK").execute(&mut *conn).await; @@ -2986,14 +2869,11 @@ where // Verify run_id matches if workflow_run_id != run_id { let _ = sqlx::query("ROLLBACK").execute(&mut *conn).await; - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!( + return Ok(RetryJobResponse::UnprocessableContentErrorResponse( + message_error_response(format!( "Run ID mismatch: provided {} but workflow is at run {}", run_id, workflow_run_id - ) - })); - return Ok(RetryJobResponse::UnprocessableContentErrorResponse( - error_response, + )), )); } @@ -3001,12 +2881,11 @@ where // Note: Running is allowed because the job runner may call retry_job before complete_job // when handling failure recovery (the job has finished locally but the server hasn't been // notified yet). - let current_status = match JobStatus::from_int(status_int) { + let current_status = match parse_job_status(status_int, job_id) { Ok(s) => s, Err(e) => { let _ = sqlx::query("ROLLBACK").execute(&mut *conn).await; - error!("Failed to parse job status: {}", e); - return Err(ApiError(format!("Failed to parse job status: {}", e))); + return Err(e); } }; @@ -3015,28 +2894,22 @@ where && current_status != JobStatus::Terminated { let _ = sqlx::query("ROLLBACK").execute(&mut *conn).await; - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!( + return Ok(RetryJobResponse::UnprocessableContentErrorResponse( + message_error_response(format!( "Job cannot be retried: status is {:?}, must be Running, Failed, or Terminated", current_status - ) - })); - return Ok(RetryJobResponse::UnprocessableContentErrorResponse( - error_response, + )), )); } // Validate max_retries (server-side enforcement) if attempt_id >= max_retries as i64 { let _ = sqlx::query("ROLLBACK").execute(&mut *conn).await; - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!( - "Job cannot be retried: attempt_id {} >= max_retries {}", - attempt_id, max_retries - ) - })); return Ok(RetryJobResponse::UnprocessableContentErrorResponse( - error_response, + message_error_response(format!( + "Job cannot be retried: attempt_id={} >= max_retries={}", + attempt_id, max_retries + )), )); } diff --git a/src/server/api/remote_workers.rs b/src/server/api/remote_workers.rs index a28f77f2..b66ec9d3 100644 --- a/src/server/api/remote_workers.rs +++ b/src/server/api/remote_workers.rs @@ -10,7 +10,9 @@ use crate::server::api_responses::{ CreateRemoteWorkersResponse, DeleteRemoteWorkerResponse, ListRemoteWorkersResponse, }; -use super::{ApiContext, database_error_with_msg}; +use super::{ + ApiContext, database_error_with_msg, message_error_response, resource_not_found_response, +}; /// Trait defining remote worker API operations #[async_trait] @@ -77,11 +79,8 @@ where { Ok(Some(_)) => {} Ok(None) => { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Workflow not found with ID: {}", workflow_id) - })); return Ok(CreateRemoteWorkersResponse::NotFoundErrorResponse( - error_response, + resource_not_found_response("Workflow", workflow_id), )); } Err(e) => { @@ -147,11 +146,8 @@ where { Ok(Some(_)) => {} Ok(None) => { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Workflow not found with ID: {}", workflow_id) - })); return Ok(ListRemoteWorkersResponse::NotFoundErrorResponse( - error_response, + resource_not_found_response("Workflow", workflow_id), )); } Err(e) => { @@ -215,11 +211,8 @@ where { Ok(Some(_)) => {} Ok(None) => { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Workflow not found with ID: {}", workflow_id) - })); return Ok(DeleteRemoteWorkerResponse::NotFoundErrorResponse( - error_response, + resource_not_found_response("Workflow", workflow_id), )); } Err(e) => { @@ -241,11 +234,11 @@ where { Ok(result) => { if result.rows_affected() == 0 { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Worker '{}' not found for workflow {}", worker, workflow_id) - })); return Ok(DeleteRemoteWorkerResponse::NotFoundErrorResponse( - error_response, + message_error_response(format!( + "Worker '{}' not found for workflow {}", + worker, workflow_id + )), )); } info!( diff --git a/src/server/api/resource_requirements.rs b/src/server/api/resource_requirements.rs index 04687f79..5f62fadc 100644 --- a/src/server/api/resource_requirements.rs +++ b/src/server/api/resource_requirements.rs @@ -17,7 +17,7 @@ use crate::memory_utils::memory_string_to_bytes; use crate::models; use crate::time_utils::duration_string_to_seconds; -use super::{ApiContext, MAX_RECORD_TRANSFER_COUNT, SqlQueryBuilder, database_error_with_msg}; +use super::{ApiContext, SqlQueryBuilder, database_error_with_msg, resource_not_found_response}; /// Trait defining resource requirements-related API operations #[async_trait] @@ -283,11 +283,8 @@ where { Ok(Some(rec)) => rec, Ok(None) => { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Resource requirements not found with ID: {}", id) - })); return Ok(GetResourceRequirementsResponse::NotFoundErrorResponse( - error_response, + resource_not_found_response("Resource requirements", id), )); } Err(e) => { @@ -384,9 +381,7 @@ where } let where_clause = where_conditions.join(" AND "); - - // Validate sort_by against whitelist - let validated_sort_by = if let Some(ref col) = sort_by { + let sort_by = if let Some(ref col) = sort_by { if RESOURCE_REQUIREMENTS_COLUMNS.contains(&col.as_str()) { Some(col.clone()) } else { @@ -403,7 +398,7 @@ where .with_pagination_and_sorting( offset, limit, - validated_sort_by, + sort_by, reverse_sort, "id", RESOURCE_REQUIREMENTS_COLUMNS, @@ -518,27 +513,23 @@ where } }; - let current_count = items.len() as i64; - let offset_val = offset; - let has_more = offset_val + current_count < total_count; + let response = crate::paginated_list_response!( + models::ListResourceRequirementsResponse, + items, + offset, + total_count + ); debug!( "list_resource_requirements({}, {}/{}) - X-Span-ID: {:?}", workflow_id, - current_count, + response.count, total_count, context.get().0.clone() ); Ok(ListResourceRequirementsResponse::SuccessfulResponse( - models::ListResourceRequirementsResponse { - items, - offset: offset_val, - max_limit: MAX_RECORD_TRANSFER_COUNT, - count: current_count, - total_count, - has_more, - }, + response, )) } diff --git a/src/server/api/results.rs b/src/server/api/results.rs index 33b37d70..df3ea61c 100644 --- a/src/server/api/results.rs +++ b/src/server/api/results.rs @@ -14,7 +14,10 @@ use crate::server::api_responses::{ use crate::models; -use super::{ApiContext, MAX_RECORD_TRANSFER_COUNT, SqlQueryBuilder, database_error_with_msg}; +use super::{ + ApiContext, SqlQueryBuilder, database_error_with_msg, parse_job_status, + resource_not_found_response, +}; /// Trait defining result-related API operations #[async_trait] @@ -254,26 +257,16 @@ where { Ok(Some(record)) => record, Ok(None) => { - let error_response = models::ErrorResponse::new( - serde_json::json!({ - "message": format!("Result not found with ID: {}", id) - }) - ); - return Ok(GetResultResponse::NotFoundErrorResponse(error_response)); + return Ok(GetResultResponse::NotFoundErrorResponse( + resource_not_found_response("Result", id), + )); } Err(e) => { return Err(database_error_with_msg(e, "Failed to fetch result")); } }; - let status_int = record.status; - let status = match models::JobStatus::from_int(status_int as i32) { - Ok(s) => s, - Err(e) => { - error!("Failed to parse job status '{}': {}", status_int, e); - return Err(ApiError(format!("Failed to parse job status: {}", e))); - } - }; + let status = parse_job_status(record.status as i32, record.job_id)?; let result_model = models::ResultModel { id: Some(record.id), @@ -372,9 +365,7 @@ where } let where_clause = where_conditions.join(" AND "); - - // Validate sort_by against whitelist - let validated_sort_by = if let Some(ref col) = sort_by { + let sort_by = if let Some(ref col) = sort_by { if RESULT_COLUMNS.contains(&col.as_str()) { // If we have a join (show_all_results is false), prefix with "r." if it's a result column if !show_all_results { @@ -393,14 +384,7 @@ where // Build the complete query with pagination and sorting let query = SqlQueryBuilder::new(base_query) .with_where(where_clause.clone()) - .with_pagination_and_sorting( - offset, - limit, - validated_sort_by, - reverse_sort, - "id", - RESULT_COLUMNS, - ) + .with_pagination_and_sorting(offset, limit, sort_by, reverse_sort, "id", RESULT_COLUMNS) .build(); debug!("Executing query: {}", query); @@ -438,17 +422,12 @@ where let mut items: Vec = Vec::new(); for record in records { let status_int: i64 = record.get("status"); - let status = match models::JobStatus::from_int(status_int as i32) { - Ok(s) => s, - Err(e) => { - error!("Failed to parse job status '{}': {}", status_int, e); - return Err(ApiError(format!("Failed to parse job status: {}", e))); - } - }; + let job_id: i64 = record.get("job_id"); + let status = parse_job_status(status_int as i32, job_id)?; items.push(models::ResultModel { id: Some(record.get("id")), workflow_id: record.get("workflow_id"), - job_id: record.get("job_id"), + job_id, run_id: record.get("run_id"), attempt_id: Some(record.get("attempt_id")), compute_node_id: record.get("compute_node_id"), @@ -498,28 +477,22 @@ where } }; - let current_count = items.len() as i64; - let offset_val = offset; - let has_more = offset_val + current_count < total_count; + let response = crate::paginated_list_response!( + models::ListResultsResponse, + items, + offset, + total_count + ); debug!( "list_results({}, {}/{}) - X-Span-ID: {:?}", workflow_id, - current_count, + response.count, total_count, context.get().0.clone() ); - Ok(ListResultsResponse::SuccessfulResponse( - models::ListResultsResponse { - items, - offset: offset_val, - max_limit: MAX_RECORD_TRANSFER_COUNT, - count: current_count, - total_count, - has_more, - }, - )) + Ok(ListResultsResponse::SuccessfulResponse(response)) } /// Update a result. @@ -583,10 +556,9 @@ where }; if result.rows_affected() == 0 { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Result not found with ID: {}", id) - })); - return Ok(UpdateResultResponse::NotFoundErrorResponse(error_response)); + return Ok(UpdateResultResponse::NotFoundErrorResponse( + resource_not_found_response("Result", id), + )); } // Return the updated result by fetching it again diff --git a/src/server/api/ro_crate.rs b/src/server/api/ro_crate.rs index 2b24fdb5..c4fc1833 100644 --- a/src/server/api/ro_crate.rs +++ b/src/server/api/ro_crate.rs @@ -15,7 +15,10 @@ use crate::server::api_responses::{ use crate::models; use crate::ro_crate_json_ld::typed_entity; -use super::{ApiContext, MAX_RECORD_TRANSFER_COUNT, SqlQueryBuilder, database_error_with_msg}; +use super::{ + ApiContext, MAX_RECORD_TRANSFER_COUNT, SqlQueryBuilder, database_error_with_msg, + message_error_response, resource_not_found_response, +}; const RO_CRATE_ENTITY_COLUMNS: &[&str] = &[ "id", @@ -420,12 +423,9 @@ where body.file_id, &body.entity_id, ); - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": message - })); return Ok( CreateRoCrateEntityResponse::UnprocessableContentErrorResponse( - error_response, + message_error_response(message), ), ); } @@ -468,11 +468,8 @@ where { Ok(Some(record)) => record, Ok(None) => { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("RO-Crate entity not found with ID: {}", id) - })); return Ok(GetRoCrateEntityResponse::NotFoundErrorResponse( - error_response, + resource_not_found_response("RO-Crate entity", id), )); } Err(e) => { @@ -521,15 +518,6 @@ where let limit = std::cmp::min(limit, MAX_RECORD_TRANSFER_COUNT); - let validated_sort_by = match sort_by.as_deref() { - Some(col) if RO_CRATE_ENTITY_COLUMNS.contains(&col) => Some(col.to_string()), - Some(col) => { - debug!("Invalid sort column requested: {}", col); - None - } - None => None, - }; - let mut where_clauses = vec!["workflow_id = ?".to_string()]; if file_id.is_some() { where_clauses.push("file_id = ?".to_string()); @@ -546,7 +534,7 @@ where .with_pagination_and_sorting( offset, limit, - validated_sort_by, + sort_by, reverse_sort, "id", RO_CRATE_ENTITY_COLUMNS, @@ -583,8 +571,6 @@ where }) .collect(); - let count = items.len() as i64; - // Get total count with the same filters but without pagination. let count_query = format!( "SELECT COUNT(*) as total FROM ro_crate_entity WHERE {}", @@ -611,17 +597,13 @@ where } }; - let has_more = offset + count < total_count; - Ok(ListRoCrateEntitiesResponse::SuccessfulResponse( - models::ListRoCrateEntitiesResponse { + crate::paginated_list_response!( + models::ListRoCrateEntitiesResponse, items, offset, - max_limit: MAX_RECORD_TRANSFER_COUNT, - count, - total_count, - has_more, - }, + total_count + ), )) } @@ -665,12 +647,9 @@ where body.file_id, &body.entity_id, ); - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": message - })); return Ok( UpdateRoCrateEntityResponse::UnprocessableContentErrorResponse( - error_response, + message_error_response(message), ), ); } @@ -682,11 +661,8 @@ where }; if result.rows_affected() == 0 { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("RO-Crate entity not found with ID: {}", id) - })); return Ok(UpdateRoCrateEntityResponse::NotFoundErrorResponse( - error_response, + resource_not_found_response("RO-Crate entity", id), )); } @@ -721,11 +697,8 @@ where }; if result.rows_affected() == 0 { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("RO-Crate entity not found with ID: {}", id) - })); return Ok(DeleteRoCrateEntityResponse::NotFoundErrorResponse( - error_response, + resource_not_found_response("RO-Crate entity", id), )); } diff --git a/src/server/api/schedulers.rs b/src/server/api/schedulers.rs index dc1a88fb..7c7e66cc 100644 --- a/src/server/api/schedulers.rs +++ b/src/server/api/schedulers.rs @@ -19,7 +19,7 @@ use crate::server::api_responses::{ use crate::models; -use super::{ApiContext, MAX_RECORD_TRANSFER_COUNT, SqlQueryBuilder, database_error_with_msg}; +use super::{ApiContext, SqlQueryBuilder, database_error_with_msg, resource_not_found_response}; /// Trait defining scheduler-related API operations #[async_trait] @@ -508,11 +508,8 @@ where { Ok(Some(record)) => record, Ok(None) => { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Local scheduler not found with ID: {}", id) - })); return Ok(GetLocalSchedulerResponse::NotFoundErrorResponse( - error_response, + resource_not_found_response("Local scheduler", id), )); } Err(e) => { @@ -558,11 +555,8 @@ where { Ok(Some(record)) => record, Ok(None) => { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Scheduled compute node not found with ID: {}", id) - })); return Ok(GetScheduledComputeNodeResponse::NotFoundErrorResponse( - error_response, + resource_not_found_response("Scheduled compute node", id), )); } Err(e) => { @@ -610,11 +604,8 @@ where { Ok(Some(record)) => record, Ok(None) => { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Slurm scheduler not found with ID: {}", id) - })); return Ok(GetSlurmSchedulerResponse::NotFoundErrorResponse( - error_response, + resource_not_found_response("Slurm scheduler", id), )); } Err(e) => { @@ -673,9 +664,7 @@ where // Build WHERE clause let where_clause = "workflow_id = ?".to_string(); - - // Validate sort_by against whitelist - let validated_sort_by = if let Some(ref col) = sort_by { + let sort_by = if let Some(ref col) = sort_by { if LOCAL_SCHEDULER_COLUMNS.contains(&col.as_str()) { Some(col.clone()) } else { @@ -692,7 +681,7 @@ where .with_pagination_and_sorting( offset, limit, - validated_sort_by, + sort_by, reverse_sort, "id", LOCAL_SCHEDULER_COLUMNS, @@ -742,28 +731,22 @@ where } }; - let current_count = items.len() as i64; - let offset_val = offset; - let has_more = offset_val + current_count < total_count; + let response = crate::paginated_list_response!( + models::ListLocalSchedulersResponse, + items, + offset, + total_count + ); debug!( "list_local_schedulers({}, {}/{}) - X-Span-ID: {:?}", workflow_id, - current_count, + response.count, total_count, context.get().0.clone() ); - Ok(ListLocalSchedulersResponse::HTTP( - models::ListLocalSchedulersResponse { - items, - offset: offset_val, - max_limit: MAX_RECORD_TRANSFER_COUNT, - count: current_count, - total_count, - has_more, - }, - )) + Ok(ListLocalSchedulersResponse::HTTP(response)) } /// Retrieve scheduled compute node records for one workflow. @@ -808,9 +791,7 @@ where } let where_clause = where_conditions.join(" AND "); - - // Validate sort_by against whitelist - let validated_sort_by = if let Some(ref col) = sort_by { + let sort_by = if let Some(ref col) = sort_by { if SCHEDULED_COMPUTE_NODE_COLUMNS.contains(&col.as_str()) { Some(col.clone()) } else { @@ -827,7 +808,7 @@ where .with_pagination_and_sorting( offset, limit, - validated_sort_by, + sort_by, reverse_sort, "id", SCHEDULED_COMPUTE_NODE_COLUMNS, @@ -895,27 +876,23 @@ where } }; - let current_count = items.len() as i64; - let offset_val = offset; - let has_more = offset_val + current_count < total_count; + let response = crate::paginated_list_response!( + models::ListScheduledComputeNodesResponse, + items, + offset, + total_count + ); debug!( "list_scheduled_compute_nodes({}, {}/{}) - X-Span-ID: {:?}", workflow_id, - current_count, + response.count, total_count, context.get().0.clone() ); Ok(ListScheduledComputeNodesResponse::SuccessfulResponse( - models::ListScheduledComputeNodesResponse { - items, - offset: offset_val, - max_limit: MAX_RECORD_TRANSFER_COUNT, - count: current_count, - total_count, - has_more, - }, + response, )) } @@ -945,9 +922,7 @@ where // Build WHERE clause let where_clause = "workflow_id = ?".to_string(); - - // Validate sort_by against whitelist - let validated_sort_by = if let Some(ref col) = sort_by { + let sort_by = if let Some(ref col) = sort_by { if SLURM_SCHEDULER_COLUMNS.contains(&col.as_str()) { Some(col.clone()) } else { @@ -964,7 +939,7 @@ where .with_pagination_and_sorting( offset, limit, - validated_sort_by, + sort_by, reverse_sort, "id", SLURM_SCHEDULER_COLUMNS, @@ -1022,28 +997,22 @@ where } }; - let current_count = items.len() as i64; - let offset_val = offset; - let has_more = offset_val + current_count < total_count; + let response = crate::paginated_list_response!( + models::ListSlurmSchedulersResponse, + items, + offset, + total_count + ); debug!( "list_slurm_schedulers({}, {}/{}) - X-Span-ID: {:?}", workflow_id, - current_count, + response.count, total_count, context.get().0.clone() ); - Ok(ListSlurmSchedulersResponse::SuccessfulResponse( - models::ListSlurmSchedulersResponse { - items, - offset: offset_val, - max_limit: MAX_RECORD_TRANSFER_COUNT, - count: current_count, - total_count, - has_more, - }, - )) + Ok(ListSlurmSchedulersResponse::SuccessfulResponse(response)) } /// Update a local scheduler. @@ -1097,11 +1066,8 @@ where }; if result.rows_affected() == 0 { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Local scheduler not found with ID: {}", id) - })); return Ok(UpdateLocalSchedulerResponse::NotFoundErrorResponse( - error_response, + resource_not_found_response("Local scheduler", id), )); } @@ -1180,11 +1146,8 @@ where }; if result.rows_affected() == 0 { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Scheduled compute node not found with ID: {}", id) - })); return Ok(UpdateScheduledComputeNodeResponse::NotFoundErrorResponse( - error_response, + resource_not_found_response("Scheduled compute node", id), )); } @@ -1278,11 +1241,8 @@ where }; if result.rows_affected() == 0 { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Slurm scheduler not found with ID: {}", id) - })); return Ok(UpdateSlurmSchedulerResponse::NotFoundErrorResponse( - error_response, + resource_not_found_response("Slurm scheduler", id), )); } diff --git a/src/server/api/slurm_stats.rs b/src/server/api/slurm_stats.rs index cd9b89ec..47efd67c 100644 --- a/src/server/api/slurm_stats.rs +++ b/src/server/api/slurm_stats.rs @@ -188,17 +188,15 @@ impl> SlurmStatsApi for SlurmStatsApiImpl node_list: r.node_list, }) .collect(); - let count = items.len() as i64; - let has_more = offset + count < total_count; - let mut response = models::ListSlurmStatsResponse::new( - offset, - limit, - count, - total_count, - has_more, - ); - response.items = items; - Ok(ListSlurmStatsResponse::SuccessfulResponse(response)) + Ok(ListSlurmStatsResponse::SuccessfulResponse( + crate::paginated_list_response!( + models::ListSlurmStatsResponse, + items, + offset, + total_count, + max_limit = limit + ), + )) } Err(e) => Err(database_error_with_msg(e, "Failed to list slurm_stats")), } diff --git a/src/server/api/user_data.rs b/src/server/api/user_data.rs index 03ac3573..ba9d7f99 100644 --- a/src/server/api/user_data.rs +++ b/src/server/api/user_data.rs @@ -14,7 +14,9 @@ use crate::server::api_responses::{ use crate::models; -use super::{ApiContext, MAX_RECORD_TRANSFER_COUNT, database_error_with_msg, escape_like_pattern}; +use super::{ + ApiContext, database_error_with_msg, escape_like_pattern, resource_not_found_response, +}; /// Trait defining user data-related API operations #[async_trait] @@ -277,10 +279,9 @@ where { Ok(Some(row)) => row, Ok(None) => { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("User data not found with ID: {}", id) - })); - return Ok(GetUserDataResponse::NotFoundErrorResponse(error_response)); + return Ok(GetUserDataResponse::NotFoundErrorResponse( + resource_not_found_response("User data", id), + )); } Err(e) => { return Err(database_error_with_msg(e, "Failed to fetch user data")); @@ -405,9 +406,7 @@ where } let where_clause = where_conditions.join(" AND "); - - // Validate sort_by against whitelist - let validated_sort_by = if let Some(ref col) = sort_by { + let sort_by = if let Some(ref col) = sort_by { if USER_DATA_COLUMNS.contains(&col.as_str()) { Some(format!("ud.{}", col)) } else { @@ -424,7 +423,7 @@ where .with_pagination_and_sorting( offset, limit, - validated_sort_by, + sort_by, reverse_sort, "ud.id", USER_DATA_COLUMNS, @@ -506,28 +505,22 @@ where } }; - let current_count = items.len() as i64; - let offset_val = offset; - let has_more = offset_val + current_count < total_count; + let response = crate::paginated_list_response!( + models::ListUserDataResponse, + items, + offset, + total_count + ); debug!( "list_user_data({}, {}/{}) - X-Span-ID: {:?}", workflow_id, - current_count, + response.count, total_count, context.get().0.clone() ); - Ok(ListUserDataResponse::SuccessfulResponse( - models::ListUserDataResponse { - items, - offset: offset_val, - max_limit: MAX_RECORD_TRANSFER_COUNT, - count: current_count, - total_count, - has_more, - }, - )) + Ok(ListUserDataResponse::SuccessfulResponse(response)) } /// Update user data. @@ -595,11 +588,8 @@ where }; if result.rows_affected() == 0 { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("User data not found with ID: {}", id) - })); return Ok(UpdateUserDataResponse::NotFoundErrorResponse( - error_response, + resource_not_found_response("User data", id), )); } diff --git a/src/server/api/workflow_actions.rs b/src/server/api/workflow_actions.rs index 28857da4..9915096a 100644 --- a/src/server/api/workflow_actions.rs +++ b/src/server/api/workflow_actions.rs @@ -13,7 +13,9 @@ use crate::server::api_responses::{ use crate::models; use crate::models::JobStatus; -use super::{ApiContext, database_error_with_msg}; +use super::{ + ApiContext, database_error_with_msg, message_error_response, resource_not_found_response, +}; /// Validate action_config based on action_type fn validate_action_config( @@ -191,11 +193,10 @@ where validate_action_config(&body.action_type, &body.action_config) { error!("Invalid action_config: {}", validation_error); - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Invalid action_config: {}", validation_error) - })); return Ok( - CreateWorkflowActionResponse::UnprocessableContentErrorResponse(error_response), + CreateWorkflowActionResponse::UnprocessableContentErrorResponse( + message_error_response(format!("Invalid action_config: {}", validation_error)), + ), ); } @@ -250,11 +251,8 @@ where } Err(e) => { error!("Failed to create workflow action: {}", e); - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Failed to create workflow action: {}", e) - })); Ok(CreateWorkflowActionResponse::DefaultErrorResponse( - error_response, + message_error_response(format!("Failed to create workflow action: {}", e)), )) } } @@ -319,22 +317,19 @@ where Ok(actions) => Ok(GetWorkflowActionsResponse::SuccessfulResponse(actions)), Err(e) => { error!("Failed to parse workflow actions: {}", e); - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Failed to parse workflow actions: {}", e) - })); Ok(GetWorkflowActionsResponse::DefaultErrorResponse( - error_response, + message_error_response(format!( + "Failed to parse workflow actions: {}", + e + )), )) } } } Err(e) => { error!("Failed to get workflow actions: {}", e); - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Failed to get workflow actions: {}", e) - })); Ok(GetWorkflowActionsResponse::DefaultErrorResponse( - error_response, + message_error_response(format!("Failed to get workflow actions: {}", e)), )) } } @@ -434,22 +429,19 @@ where Ok(actions) => Ok(GetPendingActionsResponse::SuccessfulResponse(actions)), Err(e) => { error!("Failed to parse pending actions: {}", e); - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Failed to parse pending actions: {}", e) - })); Ok(GetPendingActionsResponse::DefaultErrorResponse( - error_response, + message_error_response(format!( + "Failed to parse pending actions: {}", + e + )), )) } } } Err(e) => { error!("Failed to get pending actions: {}", e); - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Failed to get pending actions: {}", e) - })); Ok(GetPendingActionsResponse::DefaultErrorResponse( - error_response, + message_error_response(format!("Failed to get pending actions: {}", e)), )) } } @@ -486,13 +478,12 @@ where let persistent_col: i32 = record.get("persistent"); if workflow_id_col != workflow_id { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!( + return Ok(ClaimActionResponse::NotFoundErrorResponse( + message_error_response(format!( "Action {} does not belong to workflow {}", action_id, workflow_id - ) - })); - return Ok(ClaimActionResponse::NotFoundErrorResponse(error_response)); + )), + )); } // For non-persistent actions, check if already executed let is_persistent = persistent_col != 0; @@ -507,10 +498,9 @@ where is_persistent } Ok(None) => { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Action not found with ID: {}", action_id) - })); - return Ok(ClaimActionResponse::NotFoundErrorResponse(error_response)); + return Ok(ClaimActionResponse::NotFoundErrorResponse( + resource_not_found_response("Action", action_id), + )); } Err(e) => { return Err(database_error_with_msg( @@ -573,10 +563,9 @@ where } Err(e) => { error!("Failed to claim action: {}", e); - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Failed to claim action: {}", e) - })); - Ok(ClaimActionResponse::DefaultErrorResponse(error_response)) + Ok(ClaimActionResponse::DefaultErrorResponse( + message_error_response(format!("Failed to claim action: {}", e)), + )) } } } @@ -747,7 +736,7 @@ impl WorkflowActionsApiImpl { { Ok(Some(status)) => status, Ok(None) => { - debug!("Job {} not found in workflow {}", job_id, workflow_id); + debug!("job_id={} not found in workflow_id={}", job_id, workflow_id); continue; } Err(e) => { diff --git a/src/server/api/workflows.rs b/src/server/api/workflows.rs index 772eabe1..4b6a48b2 100644 --- a/src/server/api/workflows.rs +++ b/src/server/api/workflows.rs @@ -20,8 +20,8 @@ use crate::models; use super::{ ApiContext, MAX_RECORD_TRANSFER_COUNT, SqlQueryBuilder, database_error_with_msg, - deserialize_env_map, escape_like_pattern, normalize_env_map, serialize_env_map, - validate_env_map, + deserialize_env_map, escape_like_pattern, message_error_response, normalize_env_map, + resource_not_found_response, serialize_env_map, validate_env_map, }; /// Trait defining workflow-related API operations @@ -237,18 +237,6 @@ impl WorkflowsApiImpl { context.get().0.clone() ); - // Validate sort_by against whitelist - let validated_sort_by = if let Some(ref col) = sort_by { - if WORKFLOW_COLUMNS.contains(&col.as_str()) { - Some(col.clone()) - } else { - debug!("Invalid sort column requested: {}", col); - None // Fall back to default - } - } else { - None - }; - let base_query = " SELECT id @@ -305,14 +293,12 @@ impl WorkflowsApiImpl { if ids.is_empty() { // No accessible workflows - return empty result return Ok(ListWorkflowsResponse::SuccessfulResponse( - models::ListWorkflowsResponse { - items: Vec::new(), + crate::paginated_list_response!( + models::ListWorkflowsResponse, + Vec::::new(), offset, - max_limit: MAX_RECORD_TRANSFER_COUNT, - count: 0, - total_count: 0, - has_more: false, - }, + 0_i64 + ), )); } let placeholders: Vec = ids.iter().map(|_| "?".to_string()).collect(); @@ -330,7 +316,7 @@ impl WorkflowsApiImpl { .with_pagination_and_sorting( offset, limit, - validated_sort_by, + sort_by, reverse_sort, "id", WORKFLOW_COLUMNS, @@ -342,7 +328,7 @@ impl WorkflowsApiImpl { .with_pagination_and_sorting( offset, limit, - validated_sort_by, + sort_by, reverse_sort, "id", WORKFLOW_COLUMNS, @@ -465,27 +451,21 @@ impl WorkflowsApiImpl { } }; - let current_count = items.len() as i64; - let offset_val = offset; - let has_more = offset_val + current_count < total_count; + let response = crate::paginated_list_response!( + models::ListWorkflowsResponse, + items, + offset, + total_count + ); debug!( "list_workflows_filtered({}/{}) - X-Span-ID: {:?}", - current_count, + response.count, total_count, context.get().0.clone() ); - Ok(ListWorkflowsResponse::SuccessfulResponse( - models::ListWorkflowsResponse { - items, - offset: offset_val, - max_limit: MAX_RECORD_TRANSFER_COUNT, - count: current_count, - total_count, - has_more, - }, - )) + Ok(ListWorkflowsResponse::SuccessfulResponse(response)) } } @@ -518,11 +498,8 @@ where body.timestamp = Some(Utc::now().format("%Y-%m-%dT%H:%M:%S%.3fZ").to_string()); if let Err(err) = validate_env_map(body.env.as_ref(), "workflow env") { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": err.0 - })); return Ok(CreateWorkflowResponse::UnprocessableContentErrorResponse( - error_response, + message_error_response(err.0), )); } body.env = normalize_env_map(body.env.take()); @@ -667,10 +644,9 @@ where if result.rows_affected() == 0 { let _ = tx.rollback().await; - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Workflow not found with ID: {}", id) - })); - return Ok(CancelWorkflowResponse::DefaultErrorResponse(error_response)); + return Ok(CancelWorkflowResponse::NotFoundErrorResponse( + resource_not_found_response("Workflow", id), + )); } info!("Successfully canceled workflow with ID: {}", id); @@ -685,29 +661,25 @@ where }); let event_data_str = event_data.to_string(); - match sqlx::query( - r#" - INSERT INTO event (workflow_id, timestamp, data) - VALUES ($1, $2, $3) - "#, - ) - .bind(id) - .bind(timestamp) - .bind(&event_data_str) - .execute(&mut *tx) - .await - { - Ok(_) => { - debug!("Created workflow_canceled event for workflow {}", id); - } - Err(e) => { - let _ = tx.rollback().await; - return Err(database_error_with_msg( - e, - "Failed to create workflow cancellation event", - )); - } - } + crate::tx_try!( + tx, + sqlx::query( + r#" + INSERT INTO event (workflow_id, timestamp, data) + VALUES ($1, $2, $3) + "#, + ) + .bind(id) + .bind(timestamp) + .bind(&event_data_str) + .execute(&mut *tx) + .await + .map_err(|e| database_error_with_msg( + e, + "Failed to create workflow cancellation event" + )) + ); + debug!("Created workflow_canceled event for workflow_id={}", id); // Cancel all running and pending jobs in the workflow let submitted_status = models::JobStatus::Running.to_int(); @@ -791,11 +763,8 @@ where .map_err(|e| database_error_with_msg(e, "Failed to update workflow archive flag"))?; if result.rows_affected() == 0 { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Workflow not found with ID: {}", id) - })); return Ok(ArchiveWorkflowResponse::NotFoundErrorResponse( - error_response, + resource_not_found_response("Workflow", id), )); } @@ -901,12 +870,9 @@ where is_archived: Some(row.get::("is_archived") != 0), }, )), - Ok(None) => { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Workflow not found with ID: {}", id) - })); - Ok(GetWorkflowResponse::NotFoundErrorResponse(error_response)) - } + Ok(None) => Ok(GetWorkflowResponse::NotFoundErrorResponse( + resource_not_found_response("Workflow", id), + )), Err(e) => Err(database_error_with_msg(e, "Failed to get workflow")), } } @@ -932,11 +898,8 @@ where { Ok(Some(v)) => v != 0, Ok(None) => { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Workflow not found with ID: {}", id) - })); return Ok(IsWorkflowCompleteResponse::NotFoundErrorResponse( - error_response, + resource_not_found_response("Workflow", id), )); } Err(e) => { @@ -1125,11 +1088,10 @@ where let enable_ro_crate_int = body.enable_ro_crate.map(|val| if val { 1 } else { 0 }); let body_env = normalize_env_map(body.env.clone()); if body.env.is_some() && body_env != current_workflow.env { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": "Cannot modify env - this field is immutable after workflow creation" - })); return Ok(UpdateWorkflowResponse::UnprocessableContentErrorResponse( - error_response, + message_error_response( + "Cannot modify env - this field is immutable after workflow creation", + ), )); } @@ -1178,11 +1140,8 @@ where }; if result.rows_affected() == 0 { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Workflow not found with ID: {}", id) - })); return Ok(UpdateWorkflowResponse::NotFoundErrorResponse( - error_response, + resource_not_found_response("Workflow", id), )); } @@ -1467,11 +1426,12 @@ where // Check if workflow is archived if current_workflow.is_archived.unwrap_or(false) { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": "Cannot reset archived workflow status. Unarchive the workflow first." - })); return Ok( - ResetWorkflowStatusResponse::UnprocessableContentErrorResponse(error_response), + ResetWorkflowStatusResponse::UnprocessableContentErrorResponse( + message_error_response( + "Cannot reset archived workflow status. Unarchive the workflow first.", + ), + ), ); } @@ -1504,11 +1464,12 @@ where id ); } else { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": "Cannot reset workflow status: jobs are currently running or pending submission" - })); return Ok( - ResetWorkflowStatusResponse::UnprocessableContentErrorResponse(error_response), + ResetWorkflowStatusResponse::UnprocessableContentErrorResponse( + message_error_response( + "Cannot reset workflow status: jobs are currently running or pending submission", + ), + ), ); } } @@ -1534,11 +1495,12 @@ where id ); } else { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": "Cannot reset workflow status: scheduled compute nodes are currently pending or active" - })); return Ok( - ResetWorkflowStatusResponse::UnprocessableContentErrorResponse(error_response), + ResetWorkflowStatusResponse::UnprocessableContentErrorResponse( + message_error_response( + "Cannot reset workflow status: scheduled compute nodes are currently pending or active", + ), + ), ); } } @@ -1633,7 +1595,7 @@ where .unwrap_or(MAX_RECORD_TRANSFER_COUNT) .min(MAX_RECORD_TRANSFER_COUNT); - let validated_sort_by = match sort_by.as_deref() { + let sort_by = match sort_by.as_deref() { Some("job_id") => Some("jb.job_id".to_string()), Some("job_name") => Some("j1.name".to_string()), Some("depends_on_job_id") => Some("jb.depends_on_job_id".to_string()), @@ -1664,7 +1626,7 @@ where .with_pagination_and_sorting( offset_val, limit_val, - validated_sort_by, + sort_by, reverse_sort, "jb.job_id", JOB_DEPENDENCY_COLUMNS, @@ -1713,27 +1675,22 @@ where } }; - let current_count = dependencies.len() as i64; - let has_more = offset_val + current_count < total_count; + let response = crate::paginated_list_response!( + models::ListJobDependenciesResponse, + dependencies, + offset_val, + total_count + ); debug!( "list_job_dependencies({}) - returning {}/{} dependencies - X-Span-ID: {:?}", workflow_id, - current_count, + response.count, total_count, context.get().0.clone() ); - Ok(ListJobDependenciesResponse::SuccessfulResponse( - models::ListJobDependenciesResponse { - items: dependencies, - offset: offset_val, - max_limit: MAX_RECORD_TRANSFER_COUNT, - count: current_count, - total_count, - has_more, - }, - )) + Ok(ListJobDependenciesResponse::SuccessfulResponse(response)) } /// Retrieve job-file relationships for a workflow. @@ -1761,7 +1718,7 @@ where .unwrap_or(MAX_RECORD_TRANSFER_COUNT) .min(MAX_RECORD_TRANSFER_COUNT); - let validated_sort_by = match sort_by.as_deref() { + let sort_by = match sort_by.as_deref() { Some("file_id") => Some("f.id".to_string()), Some("file_name") => Some("f.name".to_string()), Some("file_path") => Some("f.path".to_string()), @@ -1802,7 +1759,7 @@ where .with_pagination_and_sorting( offset_val, limit_val, - validated_sort_by, + sort_by, reverse_sort, "f.id", JOB_FILE_RELATIONSHIP_COLUMNS, @@ -1876,26 +1833,23 @@ where } }; - let current_count = relationships.len() as i64; - let has_more = offset_val + current_count < total_count; + let response = crate::paginated_list_response!( + models::ListJobFileRelationshipsResponse, + relationships, + offset_val, + total_count + ); debug!( "list_job_file_relationships({}) - returning {}/{} relationships - X-Span-ID: {:?}", workflow_id, - current_count, + response.count, total_count, context.get().0.clone() ); Ok(ListJobFileRelationshipsResponse::SuccessfulResponse( - models::ListJobFileRelationshipsResponse { - items: relationships, - offset: offset_val, - max_limit: MAX_RECORD_TRANSFER_COUNT, - count: current_count, - total_count, - has_more, - }, + response, )) } @@ -1924,7 +1878,7 @@ where .unwrap_or(MAX_RECORD_TRANSFER_COUNT) .min(MAX_RECORD_TRANSFER_COUNT); - let validated_sort_by = match sort_by.as_deref() { + let sort_by = match sort_by.as_deref() { Some("user_data_id") => Some("ud.id".to_string()), Some("user_data_name") => Some("ud.name".to_string()), Some("producer_job_id") => Some("joud.job_id".to_string()), @@ -1964,7 +1918,7 @@ where .with_pagination_and_sorting( offset_val, limit_val, - validated_sort_by, + sort_by, reverse_sort, "ud.id", JOB_USER_DATA_RELATIONSHIP_COLUMNS, @@ -2037,26 +1991,23 @@ where } }; - let current_count = relationships.len() as i64; - let has_more = offset_val + current_count < total_count; + let response = crate::paginated_list_response!( + models::ListJobUserDataRelationshipsResponse, + relationships, + offset_val, + total_count + ); debug!( "list_job_user_data_relationships({}) - returning {}/{} relationships - X-Span-ID: {:?}", workflow_id, - current_count, + response.count, total_count, context.get().0.clone() ); Ok(ListJobUserDataRelationshipsResponse::SuccessfulResponse( - models::ListJobUserDataRelationshipsResponse { - items: relationships, - offset: offset_val, - max_limit: MAX_RECORD_TRANSFER_COUNT, - count: current_count, - total_count, - has_more, - }, + response, )) } } diff --git a/src/server/http_server.rs b/src/server/http_server.rs index 16e92032..c3ad920a 100644 --- a/src/server/http_server.rs +++ b/src/server/http_server.rs @@ -45,21 +45,43 @@ pub(super) enum TaskCreation { /// An identical task is already active; returned idempotently. No work spawned. Existing(models::TaskModel), } -macro_rules! forbidden_error { - ($reason:expr) => { +/// Emit a structured log line that always includes the request's `X-Span-ID`. The format +/// literal is what the call site provides; the trailing ` - X-Span-ID: {:?}` is appended by +/// the macro so individual handlers don't reach into `Has::::get(context)`. +/// +/// Usage: `log_call!(debug, context, "name(args...)", arg1, arg2);` +macro_rules! log_call { + ($level:ident, $context:expr, $fmt:literal $(, $arg:expr)* $(,)?) => { + log::$level!( + concat!($fmt, " - X-Span-ID: {:?}"), + $($arg,)* + Has::::get($context).0.clone() + ); + }; +} + +/// Build a `models::ErrorResponse` whose body is `{"error": , "message": }`. +/// This is the standard shape returned by the live server for typed error responses; the +/// `forbidden_error!`/`not_found_error!` aliases below cover the most common kinds, but +/// transport handlers reaching for a custom error tag should use this directly. +macro_rules! error_payload { + ($kind:expr, $reason:expr) => { models::ErrorResponse::new(serde_json::json!({ - "error": "Forbidden", + "error": $kind, "message": $reason })) }; } +macro_rules! forbidden_error { + ($reason:expr) => { + error_payload!("Forbidden", $reason) + }; +} + macro_rules! not_found_error { ($reason:expr) => { - models::ErrorResponse::new(serde_json::json!({ - "error": "NotFound", - "message": $reason - })) + error_payload!("NotFound", $reason) }; } @@ -177,6 +199,25 @@ macro_rules! authorize_admin { }; } +/// Combined "auth + pagination unpack" scaffold for paginated workflow-scoped list handlers. +/// Equivalent to writing `authorize_workflow!(...)` followed by `process_pagination_params(...)?`, +/// but keeps the entire list-handler preamble in one expression. Returns the `(offset, limit)` +/// pair on success; on auth failure or oversized limit it short-circuits the calling function. +macro_rules! authorize_workflow_and_paginate { + ($self:ident, $workflow_id:expr, $context:expr, $response_enum:ident, $offset:expr, $limit:expr) => {{ + authorize_workflow!($self, $workflow_id, $context, $response_enum); + process_pagination_params($offset, $limit)? + }}; +} + +/// Same shape as [`authorize_workflow_and_paginate!`] but for admin-only paginated list handlers. +macro_rules! authorize_admin_and_paginate { + ($self:ident, $context:expr, $response_enum:ident, $offset:expr, $limit:expr) => {{ + authorize_admin!($self, $context, $response_enum); + process_pagination_params($offset, $limit)? + }}; +} + macro_rules! authorize_workflow_group { ($self:ident, $workflow_id:expr, $group_id:expr, $context:expr, $response_enum:ident) => { match $self @@ -222,6 +263,18 @@ mod unblock_processing; mod user_data_transport; mod workflows_transport; +/// Extract the authenticated subject from the request context, falling back to "unknown" when +/// the request is anonymous. Used for attribution fields on audit/user-action events. +fn username_from_context(context: &C) -> String +where + C: Has>, +{ + Has::>::get(context) + .as_ref() + .map(|a| a.subject.clone()) + .unwrap_or_else(|| "unknown".to_string()) +} + /// Process optional offset and limit parameters and return concrete values. /// Returns (offset, limit) where: /// - offset defaults to 0 if not provided @@ -2502,7 +2555,43 @@ where // Helper methods for Server (not part of the Api trait) impl Server where - C: Has + Send + Sync, + C: Has + Has> + Send + Sync, { - // No additional helper methods needed + /// Record a best-effort "user_action" audit event. The event is awaited inline, but any + /// failure is logged and swallowed so audit failures cannot break the user-facing request. + /// The `extra` value must be a JSON object; this helper merges in the standard + /// `category`/`action`/`user` fields. + async fn record_user_action_event( + &self, + workflow_id: i64, + action: &str, + mut extra: serde_json::Value, + context: &C, + ) { + use crate::server::api::EventsApi; + + let username = username_from_context(context); + if let Some(map) = extra.as_object_mut() { + map.insert( + "category".to_string(), + serde_json::Value::String("user_action".to_string()), + ); + map.insert( + "action".to_string(), + serde_json::Value::String(action.to_string()), + ); + map.insert("user".to_string(), serde_json::Value::String(username)); + } else { + error!( + "record_user_action_event: extra payload for action '{}' must be a JSON object", + action + ); + return; + } + + let event = models::EventModel::new(workflow_id, extra); + if let Err(e) = self.events_api.create_event(event, context).await { + error!("Failed to create event for {}: {:?}", action, e); + } + } } diff --git a/src/server/http_server/access_control_transport.rs b/src/server/http_server/access_control_transport.rs index 3a6724eb..988221b9 100644 --- a/src/server/http_server/access_control_transport.rs +++ b/src/server/http_server/access_control_transport.rs @@ -30,8 +30,13 @@ where limit: Option, context: &C, ) -> Result { - authorize_admin!(self, context, ListAccessGroupsApiResponse); - let (offset, limit) = process_pagination_params(offset, limit)?; + let (offset, limit) = authorize_admin_and_paginate!( + self, + context, + ListAccessGroupsApiResponse, + offset, + limit + ); self.access_groups_api .list_access_groups(offset, limit, context) .await @@ -92,8 +97,8 @@ where limit: Option, context: &C, ) -> Result { - authorize_admin!(self, context, ListGroupMembersResponse); - let (offset, limit) = process_pagination_params(offset, limit)?; + let (offset, limit) = + authorize_admin_and_paginate!(self, context, ListGroupMembersResponse, offset, limit); self.access_groups_api .list_group_members(group_id, offset, limit, context) .await @@ -163,8 +168,14 @@ where limit: Option, context: &C, ) -> Result { - authorize_workflow!(self, workflow_id, context, ListWorkflowGroupsResponse); - let (offset, limit) = process_pagination_params(offset, limit)?; + let (offset, limit) = authorize_workflow_and_paginate!( + self, + workflow_id, + context, + ListWorkflowGroupsResponse, + offset, + limit + ); self.access_groups_api .list_workflow_groups(workflow_id, offset, limit, context) .await diff --git a/src/server/http_server/compute_nodes_transport.rs b/src/server/http_server/compute_nodes_transport.rs index 12fc587e..ad33a0d3 100644 --- a/src/server/http_server/compute_nodes_transport.rs +++ b/src/server/http_server/compute_nodes_transport.rs @@ -60,8 +60,14 @@ where scheduled_compute_node_id: Option, context: &C, ) -> Result { - authorize_workflow!(self, workflow_id, context, ListComputeNodesResponse); - let (offset, limit) = process_pagination_params(offset, limit)?; + let (offset, limit) = authorize_workflow_and_paginate!( + self, + workflow_id, + context, + ListComputeNodesResponse, + offset, + limit + ); self.compute_nodes_api .list_compute_nodes( workflow_id, diff --git a/src/server/http_server/files_transport.rs b/src/server/http_server/files_transport.rs index 0494786d..08c01122 100644 --- a/src/server/http_server/files_transport.rs +++ b/src/server/http_server/files_transport.rs @@ -35,8 +35,14 @@ where is_output: Option, context: &C, ) -> Result { - authorize_workflow!(self, workflow_id, context, ListFilesResponse); - let (offset, limit) = process_pagination_params(offset, limit)?; + let (offset, limit) = authorize_workflow_and_paginate!( + self, + workflow_id, + context, + ListFilesResponse, + offset, + limit + ); self.files_api .list_files( workflow_id, diff --git a/src/server/http_server/jobs_transport.rs b/src/server/http_server/jobs_transport.rs index 1585830f..067c822f 100644 --- a/src/server/http_server/jobs_transport.rs +++ b/src/server/http_server/jobs_transport.rs @@ -1,6 +1,7 @@ use super::*; use crate::server::api::{ - EventsApi, JobsApi, ResultsApi, WorkflowsApi, begin_immediate, database_lock_aware_error, + JobsApi, ResultsApi, WorkflowsApi, begin_immediate, database_lock_aware_error, + message_error_response, parse_job_status, resource_not_found_response, }; const RESOURCE_CLAIM_ORDER_BY: &str = "\ @@ -151,6 +152,31 @@ enum CompletionMutationError { Transport(ApiError), } +/// Translate a `GetJobResponse` into `Ok(JobModel)` on success, or `Err()` carrying the +/// matching error variant of `$target_enum`. Logs each non-success variant with the job id so +/// individual call sites do not have to repeat identical match arms. Only valid where the target +/// enum has `ForbiddenErrorResponse`/`NotFoundErrorResponse`/`DefaultErrorResponse` variants that +/// take the same `ErrorResponse` payload as `GetJobResponse`. +macro_rules! translate_get_job_response { + ($source:expr, $id:expr, $target:ident) => { + match $source { + GetJobResponse::SuccessfulResponse(job) => Ok(job), + GetJobResponse::ForbiddenErrorResponse(err) => { + error!("Access denied job_id={} error={:?}", $id, err); + Err($target::ForbiddenErrorResponse(err)) + } + GetJobResponse::NotFoundErrorResponse(err) => { + error!("Job not found job_id={} error={:?}", $id, err); + Err($target::NotFoundErrorResponse(err)) + } + GetJobResponse::DefaultErrorResponse(err) => { + error!("Failed to get job job_id={} error={:?}", $id, err); + Err($target::DefaultErrorResponse(err)) + } + } + }; +} + fn completion_error_message(err: &models::ErrorResponse) -> String { err.error .get("message") @@ -161,6 +187,43 @@ fn completion_error_message(err: &models::ErrorResponse) -> String { }) } +/// Verify that a `ResultModel` matches the target identifiers a caller is completing against. +/// Returns the validation error as a 422 `ErrorResponse` so the two completion paths +/// (`apply_job_completion_state` and `apply_job_completion_state_tx`) can wrap it identically. +fn validate_result_matches_target( + id: i64, + job_workflow_id: i64, + status: models::JobStatus, + run_id: i64, + result: &models::ResultModel, +) -> Result<(), models::ErrorResponse> { + if result.job_id != id { + return Err(message_error_response(format!( + "ResultModel job_id={} does not match target job_id={}", + result.job_id, id + ))); + } + if result.workflow_id != job_workflow_id { + return Err(message_error_response(format!( + "ResultModel workflow_id={} does not match job's workflow_id={}", + result.workflow_id, job_workflow_id + ))); + } + if result.status != status { + return Err(message_error_response(format!( + "ResultModel status='{}' does not match target status='{}'", + result.status, status + ))); + } + if result.run_id != run_id { + return Err(message_error_response(format!( + "ResultModel run_id={} does not match target run_id={}", + result.run_id, run_id + ))); + } + Ok(()) +} + struct BackfillClaimParams { workflow_id: i64, ready_status: i32, @@ -187,10 +250,10 @@ fn claim_candidate_row( return Ok(false); } - let status = models::JobStatus::from_int(row.get::("status") as i32).map_err(|e| { - error!("Failed to parse job status: {}", e); - ApiError("Invalid job status".to_string()) - })?; + let status = parse_job_status( + row.get::("status") as i32, + row.get::("job_id"), + )?; if status != models::JobStatus::Ready { error!("Expected job status to be Ready, but got: {}", status); @@ -383,14 +446,11 @@ where let first_workflow_id = body.jobs[0].workflow_id; for job in &body.jobs { if job.workflow_id != first_workflow_id { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!( + return Ok(CreateJobsResponse::UnprocessableContentErrorResponse( + message_error_response(format!( "All jobs in a batch must have the same workflow_id. Found workflow_ids: {} and {}", first_workflow_id, job.workflow_id - ) - })); - return Ok(CreateJobsResponse::UnprocessableContentErrorResponse( - error_response, + )), )); } } @@ -418,20 +478,18 @@ where async_: Option, context: &C, ) -> Result { - info!( - "initialize_jobs({}, {:?}, {:?}, async={:?}) - X-Span-ID: {:?}", + log_call!( + info, + context, + "initialize_jobs({}, {:?}, {:?}, async={:?})", id, only_uninitialized, clear_ephemeral_user_data, async_, - Has::::get(context).0.clone() ); authorize_workflow!(self, id, context, InitializeJobsResponse); - let auth: Option = Has::>::get(context).clone(); - let username = auth - .map(|a| a.subject) - .unwrap_or_else(|| "unknown".to_string()); + let username = username_from_context(context); if async_.unwrap_or(false) { let outcome = match self @@ -516,16 +574,22 @@ where active_compute_node_id: Option, context: &C, ) -> Result { - authorize_workflow!(self, workflow_id, context, ListJobsResponse); - let (processed_offset, processed_limit) = process_pagination_params(offset, limit)?; + let (offset, limit) = authorize_workflow_and_paginate!( + self, + workflow_id, + context, + ListJobsResponse, + offset, + limit + ); self.jobs_api .list_jobs( workflow_id, status, needs_file_id, upstream_job_id, - processed_offset, - processed_limit, + offset, + limit, sort_by, reverse_sort, include_relationships, @@ -569,12 +633,7 @@ where limit: Option, context: &C, ) -> Result { - debug!( - "claim_next_jobs({}, {:?}) - X-Span-ID: {:?}", - id, - limit, - Has::::get(context).0.clone() - ); + log_call!(debug, context, "claim_next_jobs({}, {:?})", id, limit); authorize_workflow!(self, id, context, ClaimNextJobsResponse); @@ -625,11 +684,12 @@ where failed_only: Option, context: &C, ) -> Result { - info!( - "reset_job_status(workflow_id={}, failed_only={:?}) - X-Span-ID: {:?}", + log_call!( + info, + context, + "reset_job_status(workflow_id={}, failed_only={:?})", id, failed_only, - Has::::get(context).0.clone() ); authorize_workflow!(self, id, context, ResetJobStatusResponse); @@ -641,25 +701,17 @@ where .await?; if let ResetJobStatusResponse::SuccessfulResponse(ref response) = result { - let auth: Option = Has::>::get(context).clone(); - let username = auth - .map(|a| a.subject) - .unwrap_or_else(|| "unknown".to_string()); - - let event = models::EventModel::new( + self.record_user_action_event( id, + "reset_job_status", serde_json::json!({ - "category": "user_action", - "action": "reset_job_status", - "user": username, "workflow_id": id, "failed_only": failed_only_value, "updated_count": response.updated_count, }), - ); - if let Err(e) = self.events_api.create_event(event, context).await { - error!("Failed to create event for reset_job_status: {:?}", e); - } + context, + ) + .await; } Ok(result) @@ -672,12 +724,13 @@ where run_id: i64, context: &C, ) -> Result { - debug!( - "manage_status_change({}, {:?}, {}) - X-Span-ID: {:?}", + log_call!( + debug, + context, + "manage_status_change({}, {:?}, {})", id, status, run_id, - Has::::get(context).0.clone() ); if status.is_complete() { @@ -685,14 +738,13 @@ where "manage_status_change: cannot set completion status '{}' for job_id={}. Use complete_job instead.", status, id ); - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!( - "Cannot set completion status '{}' via manage_status_change. Use complete_job API instead.", - status - ) - })); return Ok( - ManageStatusChangeResponse::UnprocessableContentErrorResponse(error_response), + ManageStatusChangeResponse::UnprocessableContentErrorResponse( + message_error_response(format!( + "Cannot set completion status '{}' via manage_status_change. Use complete_job API instead.", + status + )), + ), ); } @@ -726,9 +778,10 @@ where if let Err(e) = self.validate_run_id(job.workflow_id, run_id).await { error!("manage_status_change: job_id={}, {}", id, e); - let error_response = models::ErrorResponse::new(serde_json::json!({ "message": e })); return Ok( - ManageStatusChangeResponse::UnprocessableContentErrorResponse(error_response), + ManageStatusChangeResponse::UnprocessableContentErrorResponse( + message_error_response(e), + ), ); } @@ -759,22 +812,18 @@ where })?; if exists.is_none() { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Job not found with ID: {}", id) - })); return Ok(ManageStatusChangeResponse::NotFoundErrorResponse( - error_response, + resource_not_found_response("Job", id), )); } - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!( - "Job {} status was concurrently modified (expected '{}'), please retry", - id, current_status - ) - })); return Ok( - ManageStatusChangeResponse::UnprocessableContentErrorResponse(error_response), + ManageStatusChangeResponse::UnprocessableContentErrorResponse( + message_error_response(format!( + "job_id={} status was concurrently modified (expected status='{}'), please retry", + id, current_status + )), + ), ); } @@ -796,11 +845,8 @@ where "Failed to reinitialize downstream jobs for job {}: {}", id, e ); - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": "Failed to reinitialize downstream jobs" - })); return Ok(ManageStatusChangeResponse::DefaultErrorResponse( - error_response, + message_error_response("Failed to reinitialize downstream jobs"), )); } @@ -819,30 +865,24 @@ where compute_node_id: i64, context: &C, ) -> Result { - debug!( - "start_job({}, {}, {}) - X-Span-ID: {:?}", + log_call!( + debug, + context, + "start_job({}, {}, {})", id, run_id, compute_node_id, - Has::::get(context).0.clone() ); authorize_job!(self, id, context, StartJobResponse); - let mut job = match self.jobs_api.get_job(id, context).await? { - GetJobResponse::SuccessfulResponse(job) => job, - GetJobResponse::ForbiddenErrorResponse(err) => { - error!("Access denied for job {}: {:?}", id, err); - return Ok(StartJobResponse::ForbiddenErrorResponse(err)); - } - GetJobResponse::NotFoundErrorResponse(err) => { - error!("Job not found {}: {:?}", id, err); - return Ok(StartJobResponse::NotFoundErrorResponse(err)); - } - GetJobResponse::DefaultErrorResponse(err) => { - error!("Failed to get job {}: {:?}", id, err); - return Ok(StartJobResponse::DefaultErrorResponse(err)); - } + let mut job = match translate_get_job_response!( + self.jobs_api.get_job(id, context).await?, + id, + StartJobResponse + ) { + Ok(job) => job, + Err(err_response) => return Ok(err_response), }; match job.status { Some(models::JobStatus::Pending) => { @@ -854,14 +894,14 @@ where id, status ); return Err(ApiError(format!( - "Job {} has invalid status {:?}. Expected SubmittedPending for job start.", + "job_id={} has invalid status={:?}. Expected SubmittedPending for job start.", id, status ))); } None => { error!("start_job: Job status not set for job_id={}", id); return Err(ApiError(format!( - "Job {} has no status set. Expected SubmittedPending for job start.", + "job_id={} has no status set. Expected SubmittedPending for job start.", id ))); } @@ -869,9 +909,8 @@ where if let Err(e) = self.validate_run_id(job.workflow_id, run_id).await { error!("start_job: job_id={}, {}", id, e); - let error_response = models::ErrorResponse::new(serde_json::json!({ "message": e })); return Ok(StartJobResponse::UnprocessableContentErrorResponse( - error_response, + message_error_response(e), )); } @@ -896,7 +935,7 @@ where id ); return Err(ApiError(format!( - "Job {} status was concurrently modified, cannot start", + "job_id={} status was concurrently modified, cannot start", id ))); } @@ -948,13 +987,14 @@ where result: models::ResultModel, context: &C, ) -> Result { - debug!( - "complete_job({}, {:?}, {}, {:?}) - X-Span-ID: {:?}", + log_call!( + debug, + context, + "complete_job({}, {:?}, {}, {:?})", id, status, run_id, result, - Has::::get(context).0.clone() ); authorize_job!(self, id, context, CompleteJobResponse); @@ -995,25 +1035,10 @@ where } let mut job = match self.jobs_api.get_job(id, context).await { - Ok(response) => match response { - GetJobResponse::SuccessfulResponse(job) => job, - GetJobResponse::ForbiddenErrorResponse(err) => { - error!("Access denied for job {}: {:?}", id, err); - return Err(CompletionMutationError::Response(Box::new( - CompleteJobResponse::ForbiddenErrorResponse(err), - ))); - } - GetJobResponse::NotFoundErrorResponse(err) => { - error!("Job not found {}: {:?}", id, err); - return Err(CompletionMutationError::Response(Box::new( - CompleteJobResponse::NotFoundErrorResponse(err), - ))); - } - GetJobResponse::DefaultErrorResponse(err) => { - error!("Failed to get job {}: {:?}", id, err); - return Err(CompletionMutationError::Response(Box::new( - CompleteJobResponse::DefaultErrorResponse(err), - ))); + Ok(response) => match translate_get_job_response!(response, id, CompleteJobResponse) { + Ok(job) => job, + Err(err_response) => { + return Err(CompletionMutationError::Response(Box::new(err_response))); } }, Err(error) => return Err(CompletionMutationError::Transport(error)), @@ -1022,14 +1047,13 @@ where if let Some(expected_workflow_id) = expected_workflow_id && job.workflow_id != expected_workflow_id { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!( - "Job {} belongs to workflow {} but batch target is workflow {}", - id, job.workflow_id, expected_workflow_id - ) - })); return Err(CompletionMutationError::Response(Box::new( - CompleteJobResponse::UnprocessableContentErrorResponse(error_response), + CompleteJobResponse::UnprocessableContentErrorResponse(message_error_response( + format!( + "job_id={} belongs to workflow_id={} but batch target is workflow_id={}", + id, job.workflow_id, expected_workflow_id + ), + )), ))); } @@ -1037,57 +1061,22 @@ where && current_status.is_complete() { error!( - "Job {} is already complete with status {:?}", + "job_id={} is already complete with status={:?}", id, current_status ); - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Job {} is already complete with status {:?}", id, current_status) - })); return Err(CompletionMutationError::Response(Box::new( - CompleteJobResponse::UnprocessableContentErrorResponse(error_response), + CompleteJobResponse::UnprocessableContentErrorResponse(message_error_response( + format!( + "job_id={} is already complete with status={:?}", + id, current_status + ), + )), ))); } - if result.job_id != id { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!( - "ResultModel job_id {} does not match target job_id {}", - result.job_id, id - ) - })); - return Err(CompletionMutationError::Response(Box::new( - CompleteJobResponse::UnprocessableContentErrorResponse(error_response), - ))); - } - if result.workflow_id != job.workflow_id { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!( - "ResultModel workflow_id {} does not match job's workflow_id {}", - result.workflow_id, job.workflow_id - ) - })); - return Err(CompletionMutationError::Response(Box::new( - CompleteJobResponse::UnprocessableContentErrorResponse(error_response), - ))); - } - if result.status != status { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!( - "ResultModel status '{}' does not match target status '{}'", - result.status, status - ) - })); - return Err(CompletionMutationError::Response(Box::new( - CompleteJobResponse::UnprocessableContentErrorResponse(error_response), - ))); - } - if result.run_id != run_id { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!( - "ResultModel run_id {} does not match target run_id {}", - result.run_id, run_id - ) - })); + if let Err(error_response) = + validate_result_matches_target(id, job.workflow_id, status, run_id, &result) + { return Err(CompletionMutationError::Response(Box::new( CompleteJobResponse::UnprocessableContentErrorResponse(error_response), ))); @@ -1227,11 +1216,10 @@ where { Ok(Some(row)) => row, Ok(None) => { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Job not found with ID: {}", id) - })); return Err(CompletionMutationError::Response(Box::new( - CompleteJobResponse::NotFoundErrorResponse(error_response), + CompleteJobResponse::NotFoundErrorResponse(resource_not_found_response( + "Job", id, + )), ))); } Err(e) => { @@ -1246,83 +1234,40 @@ where let job_command = job_row.command; let status_i32 = i32::try_from(job_row.status).map_err(|e| { CompletionMutationError::Transport(ApiError(format!( - "Job {} has out-of-range status value {} in database: {}", + "job_id={} has out-of-range status value={} in database: {}", id, job_row.status, e ))) })?; - let current_status = match models::JobStatus::from_int(status_i32) { - Ok(s) => s, - Err(e) => { - return Err(CompletionMutationError::Transport(ApiError(format!( - "Failed to parse job status for job {}: {}", - id, e - )))); - } - }; + let current_status = + parse_job_status(status_i32, id).map_err(CompletionMutationError::Transport)?; if let Some(expected_workflow_id) = expected_workflow_id && job_workflow_id != expected_workflow_id { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!( - "Job {} belongs to workflow {} but batch target is workflow {}", - id, job_workflow_id, expected_workflow_id - ) - })); return Err(CompletionMutationError::Response(Box::new( - CompleteJobResponse::UnprocessableContentErrorResponse(error_response), + CompleteJobResponse::UnprocessableContentErrorResponse(message_error_response( + format!( + "job_id={} belongs to workflow_id={} but batch target is workflow_id={}", + id, job_workflow_id, expected_workflow_id + ), + )), ))); } if current_status.is_complete() { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Job {} is already complete with status {:?}", id, current_status) - })); return Err(CompletionMutationError::Response(Box::new( - CompleteJobResponse::UnprocessableContentErrorResponse(error_response), + CompleteJobResponse::UnprocessableContentErrorResponse(message_error_response( + format!( + "job_id={} is already complete with status={:?}", + id, current_status + ), + )), ))); } - if result.job_id != id { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!( - "ResultModel job_id {} does not match target job_id {}", - result.job_id, id - ) - })); - return Err(CompletionMutationError::Response(Box::new( - CompleteJobResponse::UnprocessableContentErrorResponse(error_response), - ))); - } - if result.workflow_id != job_workflow_id { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!( - "ResultModel workflow_id {} does not match job's workflow_id {}", - result.workflow_id, job_workflow_id - ) - })); - return Err(CompletionMutationError::Response(Box::new( - CompleteJobResponse::UnprocessableContentErrorResponse(error_response), - ))); - } - if result.status != status { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!( - "ResultModel status '{}' does not match target status '{}'", - result.status, status - ) - })); - return Err(CompletionMutationError::Response(Box::new( - CompleteJobResponse::UnprocessableContentErrorResponse(error_response), - ))); - } - if result.run_id != run_id { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!( - "ResultModel run_id {} does not match target run_id {}", - result.run_id, run_id - ) - })); + if let Err(error_response) = + validate_result_matches_target(id, job_workflow_id, status, run_id, &result) + { return Err(CompletionMutationError::Response(Box::new( CompleteJobResponse::UnprocessableContentErrorResponse(error_response), ))); @@ -1343,25 +1288,23 @@ where match workflow_run_id_row { Some(row) if row.run_id == run_id => {} Some(row) => { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!( - "Run ID mismatch: provided {} but workflow status has {}", - run_id, row.run_id - ) - })); return Err(CompletionMutationError::Response(Box::new( - CompleteJobResponse::UnprocessableContentErrorResponse(error_response), + CompleteJobResponse::UnprocessableContentErrorResponse(message_error_response( + format!( + "Run ID mismatch: provided {} but workflow status has {}", + run_id, row.run_id + ), + )), ))); } None => { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!( - "Workflow status not found for workflow ID: {}", - job_workflow_id - ) - })); return Err(CompletionMutationError::Response(Box::new( - CompleteJobResponse::UnprocessableContentErrorResponse(error_response), + CompleteJobResponse::UnprocessableContentErrorResponse(message_error_response( + format!( + "Workflow status not found for workflow ID: {}", + job_workflow_id + ), + )), ))); } } @@ -1483,21 +1426,23 @@ where let current_status = models::JobStatus::from_int(status_int as i32) .unwrap_or(models::JobStatus::Failed); if current_status.is_complete() { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Job {} is already complete with status {:?}", id, current_status) - })); return Err(CompletionMutationError::Response(Box::new( - CompleteJobResponse::UnprocessableContentErrorResponse(error_response), + CompleteJobResponse::UnprocessableContentErrorResponse( + message_error_response(format!( + "job_id={} is already complete with status={:?}", + id, current_status + )), + ), ))); } return Err(CompletionMutationError::Transport(ApiError(format!( - "Job {} is in unexpected status {:?}", + "job_id={} is in unexpected status={:?}", id, current_status )))); } None => { return Err(CompletionMutationError::Transport(ApiError(format!( - "Job {} not found", + "job_id={} not found", id )))); } @@ -1641,11 +1586,12 @@ where body: models::BatchCompleteJobsRequest, context: &C, ) -> Result { - debug!( - "batch_complete_jobs(workflow_id={}, count={}) - X-Span-ID: {:?}", + log_call!( + debug, + context, + "batch_complete_jobs(workflow_id={}, count={})", workflow_id, body.completions.len(), - Has::::get(context).0.clone() ); authorize_workflow!(self, workflow_id, context, BatchCompleteJobsResponse); @@ -1761,12 +1707,13 @@ where ApiError("Database connection error".to_string()) })?; - debug!( - "get_ready_jobs: workflow_id={}, limit={}, resources={:?} - X-Span-ID: {:?}", + log_call!( + debug, + context, + "get_ready_jobs: workflow_id={}, limit={}, resources={:?}", workflow_id, limit, resources, - Has::::get(context).0.clone() ); // Workflow existence check runs without a transaction. WAL mode allows @@ -1781,11 +1728,8 @@ where })?; if workflow_exists.is_none() { - let error_response = models::ErrorResponse::new(serde_json::json!({ - "message": format!("Workflow not found with ID: {}", workflow_id) - })); return Ok(ClaimJobsBasedOnResources::NotFoundErrorResponse( - error_response, + resource_not_found_response("Workflow", workflow_id), )); } diff --git a/src/server/http_server/lifecycle_support.rs b/src/server/http_server/lifecycle_support.rs index e5ee7e7e..10c27442 100644 --- a/src/server/http_server/lifecycle_support.rs +++ b/src/server/http_server/lifecycle_support.rs @@ -1,5 +1,7 @@ use super::*; -use crate::server::api::{begin_immediate, database_error_with_msg, database_lock_aware_error}; +use crate::server::api::{ + begin_immediate, database_error_with_msg, database_lock_aware_error, parse_job_status, +}; impl Server { pub(super) async fn manage_job_status_change( @@ -27,7 +29,7 @@ impl Server { { Ok(Some(row)) => row, Ok(None) => { - error!("Job not found with ID: {}", job_id); + error!("Job not found job_id={}", job_id); return Err(ApiError("Job not found".to_string())); } Err(e) => { @@ -36,16 +38,7 @@ impl Server { } }; - let current_status = match models::JobStatus::from_int(current_job.status as i32) { - Ok(status) => status, - Err(e) => { - error!( - "Failed to parse current job status '{}': {}", - current_job.status, e - ); - return Err(ApiError("Invalid current job status".to_string())); - } - }; + let current_status = parse_job_status(current_job.status as i32, job_id)?; if current_status == *new_status { debug!( @@ -82,10 +75,7 @@ impl Server { }; if result_record.is_none() { - error!( - "No result found for job ID {} and run_id {}", - job_id, run_id - ); + error!("No result found for job_id={} run_id={}", job_id, run_id); return Err(ApiError( "No result found when transitioning to terminal status".to_string(), )); @@ -133,23 +123,23 @@ impl Server { .unwrap_or(models::JobStatus::Failed); if status.is_complete() { debug!( - "Job {} already in terminal status {:?}, treating as idempotent success", + "job_id={} already in terminal status={:?}, treating as idempotent success", job_id, status ); return Ok(()); } error!( - "Job {} has unexpected status {:?} after conditional update matched 0 rows", + "job_id={} has unexpected status={:?} after conditional update matched 0 rows", job_id, status ); return Err(ApiError(format!( - "Job {} is in unexpected status {:?}", + "job_id={} is in unexpected status={:?}", job_id, status ))); } None => { - error!("Job {} was deleted during status transition", job_id); - return Err(ApiError(format!("Job {} not found", job_id))); + error!("job_id={} was deleted during status transition", job_id); + return Err(ApiError(format!("job_id={} not found", job_id))); } } } @@ -160,7 +150,7 @@ impl Server { } self.signal_job_completion(); debug!( - "Marked job {} as complete, unblocking will be processed by background task", + "Marked job_id={} as complete, unblocking will be processed by background task", job_id ); } else { @@ -175,7 +165,7 @@ impl Server { Ok(result) => { if result.rows_affected() == 0 { error!( - "No rows affected for job ID {} when updating status", + "No rows affected for job_id={} when updating status", job_id ); return Err(ApiError( diff --git a/src/server/http_server/local_schedulers_transport.rs b/src/server/http_server/local_schedulers_transport.rs index 4e709177..a4492a47 100644 --- a/src/server/http_server/local_schedulers_transport.rs +++ b/src/server/http_server/local_schedulers_transport.rs @@ -42,8 +42,14 @@ where num_cpus: Option, context: &C, ) -> Result { - authorize_workflow!(self, workflow_id, context, ListLocalSchedulersResponse); - let (offset, limit) = process_pagination_params(offset, limit)?; + let (offset, limit) = authorize_workflow_and_paginate!( + self, + workflow_id, + context, + ListLocalSchedulersResponse, + offset, + limit + ); self.schedulers_api .list_local_schedulers( workflow_id, diff --git a/src/server/http_server/resource_requirements_transport.rs b/src/server/http_server/resource_requirements_transport.rs index 368a64b0..6f4fdfe6 100644 --- a/src/server/http_server/resource_requirements_transport.rs +++ b/src/server/http_server/resource_requirements_transport.rs @@ -1,5 +1,5 @@ use super::*; -use crate::server::api::{EventsApi, ResourceRequirementsApi}; +use crate::server::api::ResourceRequirementsApi; #[allow(clippy::too_many_arguments)] impl Server @@ -62,8 +62,14 @@ where reverse_sort: Option, context: &C, ) -> Result { - authorize_workflow!(self, workflow_id, context, ListResourceRequirementsResponse); - let (offset, limit) = process_pagination_params(offset, limit)?; + let (offset, limit) = authorize_workflow_and_paginate!( + self, + workflow_id, + context, + ListResourceRequirementsResponse, + offset, + limit + ); self.resource_requirements_api .list_resource_requirements( workflow_id, @@ -88,11 +94,12 @@ where scheduler_config_id: Option, context: &C, ) -> Result { - debug!( - "get_ready_job_requirements({}, {:?}) - X-Span-ID: {:?}", + log_call!( + debug, + context, + "get_ready_job_requirements({}, {:?})", id, scheduler_config_id, - Has::::get(context).0.clone() ); authorize_workflow!(self, id, context, GetReadyJobRequirementsResponse); error!("get_ready_job_requirements operation is not implemented"); @@ -134,17 +141,10 @@ where .await?; if let UpdateResourceRequirementsResponse::SuccessfulResponse(ref rr) = result { - let auth: Option = Has::>::get(context).clone(); - let username = auth - .map(|a| a.subject) - .unwrap_or_else(|| "unknown".to_string()); - - let event = models::EventModel::new( + self.record_user_action_event( rr.workflow_id, + "update_resource_requirements", serde_json::json!({ - "category": "user_action", - "action": "update_resource_requirements", - "user": username, "resource_requirements_id": id, "name": rr.name, "num_cpus": rr.num_cpus, @@ -153,13 +153,9 @@ where "memory": rr.memory, "runtime": rr.runtime, }), - ); - if let Err(e) = self.events_api.create_event(event, context).await { - error!( - "Failed to create event for update_resource_requirements: {:?}", - e - ); - } + context, + ) + .await; } Ok(result) diff --git a/src/server/http_server/results_transport.rs b/src/server/http_server/results_transport.rs index 0bfac94b..f19a811b 100644 --- a/src/server/http_server/results_transport.rs +++ b/src/server/http_server/results_transport.rs @@ -37,8 +37,10 @@ where all_runs: Option, context: &C, ) -> Result { - debug!( - "list_results({}, {:?}, {:?}, {:?}, {:?}, compute_node_id={:?}, {:?}, {:?}, {:?}, {:?}, all_runs={:?}) - X-Span-ID: {:?}", + log_call!( + debug, + context, + "list_results({}, {:?}, {:?}, {:?}, {:?}, compute_node_id={:?}, {:?}, {:?}, {:?}, {:?}, all_runs={:?})", workflow_id, job_id, run_id, @@ -50,12 +52,16 @@ where sort_by, reverse_sort, all_runs, - Has::::get(context).0.clone() ); - authorize_workflow!(self, workflow_id, context, ListResultsResponse); - - let (offset, limit) = process_pagination_params(offset, limit)?; + let (offset, limit) = authorize_workflow_and_paginate!( + self, + workflow_id, + context, + ListResultsResponse, + offset, + limit + ); self.results_api .list_results( workflow_id, diff --git a/src/server/http_server/ro_crate_transport.rs b/src/server/http_server/ro_crate_transport.rs index 1de5e646..7de3b420 100644 --- a/src/server/http_server/ro_crate_transport.rs +++ b/src/server/http_server/ro_crate_transport.rs @@ -42,8 +42,14 @@ where reverse_sort: Option, context: &C, ) -> Result { - authorize_workflow!(self, workflow_id, context, ListRoCrateEntitiesResponse); - let (offset, limit) = process_pagination_params(offset, limit)?; + let (offset, limit) = authorize_workflow_and_paginate!( + self, + workflow_id, + context, + ListRoCrateEntitiesResponse, + offset, + limit + ); self.ro_crate_api .list_ro_crate_entities( workflow_id, diff --git a/src/server/http_server/scheduled_compute_nodes_transport.rs b/src/server/http_server/scheduled_compute_nodes_transport.rs index 16b9cb6d..81d7f01e 100644 --- a/src/server/http_server/scheduled_compute_nodes_transport.rs +++ b/src/server/http_server/scheduled_compute_nodes_transport.rs @@ -74,13 +74,14 @@ where status: Option, context: &C, ) -> Result { - authorize_workflow!( + let (offset, limit) = authorize_workflow_and_paginate!( self, workflow_id, context, - ListScheduledComputeNodesResponse + ListScheduledComputeNodesResponse, + offset, + limit ); - let (offset, limit) = process_pagination_params(offset, limit)?; self.schedulers_api .list_scheduled_compute_nodes( workflow_id, diff --git a/src/server/http_server/slurm_schedulers_transport.rs b/src/server/http_server/slurm_schedulers_transport.rs index 50ecc923..78c4383e 100644 --- a/src/server/http_server/slurm_schedulers_transport.rs +++ b/src/server/http_server/slurm_schedulers_transport.rs @@ -40,8 +40,14 @@ where reverse_sort: Option, context: &C, ) -> Result { - authorize_workflow!(self, workflow_id, context, ListSlurmSchedulersResponse); - let (offset, limit) = process_pagination_params(offset, limit)?; + let (offset, limit) = authorize_workflow_and_paginate!( + self, + workflow_id, + context, + ListSlurmSchedulersResponse, + offset, + limit + ); self.schedulers_api .list_slurm_schedulers(workflow_id, offset, limit, sort_by, reverse_sort, context) .await diff --git a/src/server/http_server/slurm_stats_transport.rs b/src/server/http_server/slurm_stats_transport.rs index 809905f5..b7d959ae 100644 --- a/src/server/http_server/slurm_stats_transport.rs +++ b/src/server/http_server/slurm_stats_transport.rs @@ -24,15 +24,22 @@ where limit: Option, context: &C, ) -> Result { - authorize_workflow!(self, workflow_id, context, ListSlurmStatsResponse); + let (offset, limit) = authorize_workflow_and_paginate!( + self, + workflow_id, + context, + ListSlurmStatsResponse, + offset, + limit + ); self.slurm_stats_api .list_slurm_stats( workflow_id, job_id, run_id, attempt_id, - offset.unwrap_or(0), - limit.unwrap_or(MAX_RECORD_TRANSFER_COUNT), + offset, + limit, context, ) .await diff --git a/src/server/http_server/system_transport.rs b/src/server/http_server/system_transport.rs index fa29e09b..3ec6ce2c 100644 --- a/src/server/http_server/system_transport.rs +++ b/src/server/http_server/system_transport.rs @@ -9,10 +9,7 @@ where &self, context: &C, ) -> Result { - debug!( - "get_version() - X-Span-ID: {:?}", - Has::::get(context).0.clone() - ); + log_call!(debug, context, "get_version()"); if self.authorization_service.enforce_access_control() { Ok(GetVersionResponse::SuccessfulResponse(serde_json::json!({ "version": full_version(), @@ -28,10 +25,7 @@ where } pub(super) async fn transport_ping(&self, context: &C) -> Result { - debug!( - "ping() - X-Span-ID: {:?}", - Has::::get(context).0.clone() - ); + log_call!(debug, context, "ping()"); Ok(PingResponse::SuccessfulResponse( serde_json::json!({"status": "ok"}), )) @@ -41,22 +35,17 @@ where &self, context: &C, ) -> Result { - debug!( - "reload_auth() - X-Span-ID: {:?}", - Has::::get(context).0.clone() - ); + log_call!(debug, context, "reload_auth()"); authorize_admin!(self, context, ReloadAuthResponse); let auth_file_path = match &self.auth_file_path { Some(path) => path.clone(), None => { - return Ok(ReloadAuthResponse::DefaultErrorResponse( - models::ErrorResponse::new(serde_json::json!({ - "error": "NoAuthFile", - "message": "No auth file configured. Start the server with --auth-file to enable auth reloading." - })), - )); + return Ok(ReloadAuthResponse::DefaultErrorResponse(error_payload!( + "NoAuthFile", + "No auth file configured. Start the server with --auth-file to enable auth reloading." + ))); } }; @@ -94,12 +83,10 @@ where } Err(e) => { error!("Failed to reload htpasswd file: {}", e); - Ok(ReloadAuthResponse::DefaultErrorResponse( - models::ErrorResponse::new(serde_json::json!({ - "error": "ReloadFailed", - "message": format!("Failed to reload htpasswd file: {}", e) - })), - )) + Ok(ReloadAuthResponse::DefaultErrorResponse(error_payload!( + "ReloadFailed", + format!("Failed to reload htpasswd file: {}", e) + ))) } } } diff --git a/src/server/http_server/user_data_transport.rs b/src/server/http_server/user_data_transport.rs index e4c0ecf0..d7b45ff1 100644 --- a/src/server/http_server/user_data_transport.rs +++ b/src/server/http_server/user_data_transport.rs @@ -41,9 +41,14 @@ where is_ephemeral: Option, context: &C, ) -> Result { - authorize_workflow!(self, workflow_id, context, ListUserDataResponse); - - let (offset, limit) = process_pagination_params(offset, limit)?; + let (offset, limit) = authorize_workflow_and_paginate!( + self, + workflow_id, + context, + ListUserDataResponse, + offset, + limit + ); self.user_data_api .list_user_data( workflow_id, diff --git a/src/server/http_server/workflows_transport.rs b/src/server/http_server/workflows_transport.rs index b7cb240e..4d1069ff 100644 --- a/src/server/http_server/workflows_transport.rs +++ b/src/server/http_server/workflows_transport.rs @@ -57,8 +57,14 @@ where limit: Option, context: &C, ) -> Result { - authorize_workflow!(self, workflow_id, context, ListFailureHandlersResponse); - let (offset, limit) = process_pagination_params(offset, limit)?; + let (offset, limit) = authorize_workflow_and_paginate!( + self, + workflow_id, + context, + ListFailureHandlersResponse, + offset, + limit + ); self.failure_handlers_api .list_failure_handlers(workflow_id, offset, limit, context) .await @@ -233,11 +239,7 @@ where id: i64, context: &C, ) -> Result { - info!( - "cancel_workflow(workflow_id={}) - X-Span-ID: {:?}", - id, - Has::::get(context).0.clone() - ); + log_call!(info, context, "cancel_workflow(workflow_id={})", id); authorize_workflow!(self, id, context, CancelWorkflowResponse); let response = self.workflows_api.cancel_workflow(id, context).await?; Ok(response) @@ -249,11 +251,12 @@ where body: models::ArchiveWorkflowRequest, context: &C, ) -> Result { - info!( - "archive_workflow(workflow_id={}, is_archived={}) - X-Span-ID: {:?}", + log_call!( + info, + context, + "archive_workflow(workflow_id={}, is_archived={})", id, body.is_archived, - Has::::get(context).0.clone() ); authorize_workflow!(self, id, context, ArchiveWorkflowResponse); // When archiving, drop the workflow from the failures cache (mirrors @@ -286,8 +289,14 @@ where after_timestamp: Option, context: &C, ) -> Result { - authorize_workflow!(self, workflow_id, context, ListEventsResponse); - let (offset, limit) = process_pagination_params(offset, limit)?; + let (offset, limit) = authorize_workflow_and_paginate!( + self, + workflow_id, + context, + ListEventsResponse, + offset, + limit + ); self.events_api .list_events( workflow_id, @@ -431,11 +440,7 @@ where id: i64, context: &C, ) -> Result { - info!( - "delete_workflow(workflow_id={}) - X-Span-ID: {:?}", - id, - Has::::get(context).0.clone() - ); + log_call!(info, context, "delete_workflow(workflow_id={})", id); authorize_workflow!(self, id, context, DeleteWorkflowResponse); if let Ok(mut set) = self.workflows_with_failures.write() { set.remove(&id); @@ -449,11 +454,12 @@ where force: Option, context: &C, ) -> Result { - info!( - "reset_workflow_status(workflow_id={}, force={:?}) - X-Span-ID: {:?}", + log_call!( + info, + context, + "reset_workflow_status(workflow_id={}, force={:?})", id, force, - Has::::get(context).0.clone() ); authorize_workflow!(self, id, context, ResetWorkflowStatusResponse); @@ -468,24 +474,16 @@ where .await?; if let ResetWorkflowStatusResponse::SuccessfulResponse(_) = result { - let auth: Option = Has::>::get(context).clone(); - let username = auth - .map(|a| a.subject) - .unwrap_or_else(|| "unknown".to_string()); - - let event = models::EventModel::new( + self.record_user_action_event( id, + "reset_workflow_status", serde_json::json!({ - "category": "user_action", - "action": "reset_workflow_status", - "user": username, "workflow_id": id, "force": force_value, }), - ); - if let Err(e) = self.events_api.create_event(event, context).await { - error!("Failed to create event for reset_workflow_status: {:?}", e); - } + context, + ) + .await; } Ok(result) @@ -498,13 +496,14 @@ where strict_scheduler_match: Option, context: &C, ) -> Result { - debug!( - "claim_jobs_based_on_resources({}, {:?}, {:?}, strict_scheduler_match={:?}) - X-Span-ID: {:?}", + log_call!( + debug, + context, + "claim_jobs_based_on_resources({}, {:?}, {:?}, strict_scheduler_match={:?})", id, body, limit, strict_scheduler_match, - Has::::get(context).0.clone() ); authorize_workflow!(self, id, context, ClaimJobsBasedOnResources); diff --git a/src/server/http_transport/path_parsing.rs b/src/server/http_transport/path_parsing.rs index d2bedf4d..b127e561 100644 --- a/src/server/http_transport/path_parsing.rs +++ b/src/server/http_transport/path_parsing.rs @@ -6,6 +6,18 @@ pub(super) fn decode_path_segment(segment: &str) -> Option { .map(|value| value.into_owned()) } +/// Strip `prefix` from the start of `path` and `suffix` from the end, returning the segment in +/// between only when it is a single path segment (i.e. contains no `/`). This is the shared +/// shape behind the `/torc-service/v1//{id}/` helpers below. +#[cfg(test)] +fn strip_prefix_and_suffix<'a>(path: &'a str, prefix: &str, suffix: &str) -> Option<&'a str> { + let middle = path.strip_prefix(prefix)?.strip_suffix(suffix)?; + if middle.contains('/') { + return None; + } + Some(middle) +} + #[cfg(test)] pub(super) fn parse_group_member_path(path: &str) -> Option<(i64, String)> { let suffix = path.strip_prefix("/torc-service/v1/access_groups/")?; @@ -18,32 +30,20 @@ pub(super) fn parse_group_member_path(path: &str) -> Option<(i64, String)> { #[cfg(test)] pub(super) fn parse_access_group_members_collection_path(path: &str) -> Option { - let group_id = path.strip_prefix("/torc-service/v1/access_groups/")?; - let group_id = group_id.strip_suffix("/members")?; - if group_id.contains('/') { - return None; - } - group_id.parse::().ok() + strip_prefix_and_suffix(path, "/torc-service/v1/access_groups/", "/members")? + .parse::() + .ok() } #[cfg(test)] pub(super) fn parse_user_groups_path(path: &str) -> Option { - let user_name = path.strip_prefix("/torc-service/v1/users/")?; - let user_name = user_name.strip_suffix("/groups")?; - if user_name.contains('/') { - return None; - } + let user_name = strip_prefix_and_suffix(path, "/torc-service/v1/users/", "/groups")?; decode_path_segment(user_name) } #[cfg(test)] pub(super) fn parse_workflow_access_groups_collection_path(path: &str) -> Option { - let workflow_id = path.strip_prefix("/torc-service/v1/workflows/")?; - let workflow_id = workflow_id.strip_suffix("/access_groups")?; - if workflow_id.contains('/') { - return None; - } - workflow_id.parse::().ok() + parse_workflow_suffix_path(path, "/access_groups") } #[cfg(test)] @@ -71,22 +71,14 @@ pub(super) fn parse_access_check_path(path: &str) -> Option<(i64, String)> { #[cfg(test)] pub(super) fn parse_workflow_failure_handlers_path(path: &str) -> Option { - let workflow_id = path.strip_prefix("/torc-service/v1/workflows/")?; - let workflow_id = workflow_id.strip_suffix("/failure_handlers")?; - if workflow_id.contains('/') { - return None; - } - workflow_id.parse::().ok() + parse_workflow_suffix_path(path, "/failure_handlers") } #[cfg(test)] pub(super) fn parse_workflow_suffix_path(path: &str, suffix: &str) -> Option { - let workflow_id = path.strip_prefix("/torc-service/v1/workflows/")?; - let workflow_id = workflow_id.strip_suffix(suffix)?; - if workflow_id.contains('/') { - return None; - } - workflow_id.parse::().ok() + strip_prefix_and_suffix(path, "/torc-service/v1/workflows/", suffix)? + .parse::() + .ok() } #[cfg(test)] diff --git a/src/server/http_transport/query_parsing.rs b/src/server/http_transport/query_parsing.rs index e719eaec..1c261f40 100644 --- a/src/server/http_transport/query_parsing.rs +++ b/src/server/http_transport/query_parsing.rs @@ -206,18 +206,14 @@ pub(super) struct ResetWorkflowStatusQuery { pub(super) fn parse_delete_compute_nodes_query( query: Option<&str>, ) -> Result { - let params: HashMap = form_urlencoded::parse(query.unwrap_or("").as_bytes()) - .into_owned() - .collect(); + let params = parse_params(query); Ok(DeleteComputeNodesQuery { workflow_id: parse_required_i64(¶ms, "workflow_id")?, }) } pub(super) fn parse_compute_nodes_query(query: Option<&str>) -> Result { - let params: HashMap = form_urlencoded::parse(query.unwrap_or("").as_bytes()) - .into_owned() - .collect(); + let params = parse_params(query); Ok(ComputeNodesQuery { workflow_id: parse_required_i64(¶ms, "workflow_id")?, @@ -232,9 +228,7 @@ pub(super) fn parse_compute_nodes_query(query: Option<&str>) -> Result) -> Result { - let params: HashMap = form_urlencoded::parse(query.unwrap_or("").as_bytes()) - .into_owned() - .collect(); + let params = parse_params(query); let workflow_id = parse_required_i64(¶ms, "workflow_id")?; Ok(EventsQuery { @@ -249,9 +243,7 @@ pub(super) fn parse_events_query(query: Option<&str>) -> Result) -> Result { - let params: HashMap = form_urlencoded::parse(query.unwrap_or("").as_bytes()) - .into_owned() - .collect(); + let params = parse_params(query); let workflow_id = parse_required_i64(¶ms, "workflow_id")?; Ok(FilesQuery { @@ -270,9 +262,7 @@ pub(super) fn parse_files_query(query: Option<&str>) -> Result, ) -> Result { - let params: HashMap = form_urlencoded::parse(query.unwrap_or("").as_bytes()) - .into_owned() - .collect(); + let params = parse_params(query); let workflow_id = parse_required_i64(¶ms, "workflow_id")?; Ok(LocalSchedulersQuery { @@ -287,9 +277,7 @@ pub(super) fn parse_local_schedulers_query( } pub(super) fn parse_results_query(query: Option<&str>) -> Result { - let params: HashMap = form_urlencoded::parse(query.unwrap_or("").as_bytes()) - .into_owned() - .collect(); + let params = parse_params(query); let workflow_id = parse_required_i64(¶ms, "workflow_id")?; Ok(ResultsQuery { @@ -308,9 +296,7 @@ pub(super) fn parse_results_query(query: Option<&str>) -> Result) -> Result { - let params: HashMap = form_urlencoded::parse(query.unwrap_or("").as_bytes()) - .into_owned() - .collect(); + let params = parse_params(query); let workflow_id = parse_required_i64(¶ms, "workflow_id")?; Ok(UserDataQuery { @@ -329,9 +315,7 @@ pub(super) fn parse_user_data_query(query: Option<&str>) -> Result, ) -> Result { - let params: HashMap = form_urlencoded::parse(query.unwrap_or("").as_bytes()) - .into_owned() - .collect(); + let params = parse_params(query); Ok(UserDataCreateQuery { consumer_job_id: parse_optional_i64(¶ms, "consumer_job_id")?, producer_job_id: parse_optional_i64(¶ms, "producer_job_id")?, @@ -341,9 +325,7 @@ pub(super) fn parse_user_data_create_query( pub(super) fn parse_scheduled_compute_nodes_query( query: Option<&str>, ) -> Result { - let params: HashMap = form_urlencoded::parse(query.unwrap_or("").as_bytes()) - .into_owned() - .collect(); + let params = parse_params(query); Ok(ScheduledComputeNodesQuery { workflow_id: parse_required_i64(¶ms, "workflow_id")?, offset: parse_optional_i64(¶ms, "offset")?, @@ -359,9 +341,7 @@ pub(super) fn parse_scheduled_compute_nodes_query( pub(super) fn parse_slurm_schedulers_query( query: Option<&str>, ) -> Result { - let params: HashMap = form_urlencoded::parse(query.unwrap_or("").as_bytes()) - .into_owned() - .collect(); + let params = parse_params(query); Ok(SlurmSchedulersQuery { workflow_id: parse_required_i64(¶ms, "workflow_id")?, offset: parse_optional_i64(¶ms, "offset")?, @@ -374,9 +354,7 @@ pub(super) fn parse_slurm_schedulers_query( pub(super) fn parse_access_pagination_query( query: Option<&str>, ) -> Result { - let params: HashMap = form_urlencoded::parse(query.unwrap_or("").as_bytes()) - .into_owned() - .collect(); + let params = parse_params(query); Ok(AccessPaginationQuery { offset: parse_optional_i64(¶ms, "offset")?, limit: parse_optional_i64(¶ms, "limit")?, @@ -386,9 +364,7 @@ pub(super) fn parse_access_pagination_query( pub(super) fn parse_resource_requirements_query( query: Option<&str>, ) -> Result { - let params: HashMap = form_urlencoded::parse(query.unwrap_or("").as_bytes()) - .into_owned() - .collect(); + let params = parse_params(query); Ok(ResourceRequirementsQuery { workflow_id: parse_required_i64(¶ms, "workflow_id")?, job_id: parse_optional_i64(¶ms, "job_id")?, @@ -406,9 +382,7 @@ pub(super) fn parse_resource_requirements_query( } pub(super) fn parse_slurm_stats_query(query: Option<&str>) -> Result { - let params: HashMap = form_urlencoded::parse(query.unwrap_or("").as_bytes()) - .into_owned() - .collect(); + let params = parse_params(query); Ok(SlurmStatsQuery { workflow_id: parse_required_i64(¶ms, "workflow_id")?, job_id: parse_optional_i64(¶ms, "job_id")?, @@ -420,9 +394,7 @@ pub(super) fn parse_slurm_stats_query(query: Option<&str>) -> Result) -> Result { - let params: HashMap = form_urlencoded::parse(query.unwrap_or("").as_bytes()) - .into_owned() - .collect(); + let params = parse_params(query); Ok(WorkflowsQuery { offset: parse_optional_i64(¶ms, "offset")?, sort_by: params.get("sort_by").cloned(), @@ -438,9 +410,7 @@ pub(super) fn parse_workflows_query(query: Option<&str>) -> Result, ) -> Result { - let params: HashMap = form_urlencoded::parse(query.unwrap_or("").as_bytes()) - .into_owned() - .collect(); + let params = parse_params(query); Ok(WorkflowRelationshipsQuery { offset: parse_optional_i64(¶ms, "offset")?, limit: parse_optional_i64(¶ms, "limit")?, @@ -467,9 +437,7 @@ pub(super) fn parse_pending_actions_query( pub(super) fn parse_initialize_jobs_query( query: Option<&str>, ) -> Result { - let params: HashMap = form_urlencoded::parse(query.unwrap_or("").as_bytes()) - .into_owned() - .collect(); + let params = parse_params(query); Ok(InitializeJobsQuery { only_uninitialized: parse_optional_bool(¶ms, "only_uninitialized")?, clear_ephemeral_user_data: parse_optional_bool(¶ms, "clear_ephemeral_user_data")?, @@ -480,9 +448,7 @@ pub(super) fn parse_initialize_jobs_query( pub(super) fn parse_claim_jobs_based_on_resources_query( query: Option<&str>, ) -> Result { - let params: HashMap = form_urlencoded::parse(query.unwrap_or("").as_bytes()) - .into_owned() - .collect(); + let params = parse_params(query); Ok(ClaimJobsBasedOnResourcesQuery { strict_scheduler_match: parse_optional_bool(¶ms, "strict_scheduler_match")?, }) @@ -491,9 +457,7 @@ pub(super) fn parse_claim_jobs_based_on_resources_query( pub(super) fn parse_claim_next_jobs_query( query: Option<&str>, ) -> Result { - let params: HashMap = form_urlencoded::parse(query.unwrap_or("").as_bytes()) - .into_owned() - .collect(); + let params = parse_params(query); Ok(ClaimNextJobsQuery { limit: parse_optional_i64(¶ms, "limit")?, }) @@ -502,9 +466,7 @@ pub(super) fn parse_claim_next_jobs_query( pub(super) fn parse_process_changed_job_inputs_query( query: Option<&str>, ) -> Result { - let params: HashMap = form_urlencoded::parse(query.unwrap_or("").as_bytes()) - .into_owned() - .collect(); + let params = parse_params(query); Ok(ProcessChangedJobInputsQuery { dry_run: parse_optional_bool(¶ms, "dry_run")?, }) @@ -513,9 +475,7 @@ pub(super) fn parse_process_changed_job_inputs_query( pub(super) fn parse_get_ready_job_requirements_query( query: Option<&str>, ) -> Result { - let params: HashMap = form_urlencoded::parse(query.unwrap_or("").as_bytes()) - .into_owned() - .collect(); + let params = parse_params(query); Ok(GetReadyJobRequirementsQuery { scheduler_config_id: parse_optional_i64(¶ms, "scheduler_config_id")?, }) @@ -524,9 +484,7 @@ pub(super) fn parse_get_ready_job_requirements_query( pub(super) fn parse_reset_job_status_query( query: Option<&str>, ) -> Result { - let params: HashMap = form_urlencoded::parse(query.unwrap_or("").as_bytes()) - .into_owned() - .collect(); + let params = parse_params(query); Ok(ResetJobStatusQuery { failed_only: parse_optional_bool(¶ms, "failed_only")?, }) @@ -535,14 +493,23 @@ pub(super) fn parse_reset_job_status_query( pub(super) fn parse_reset_workflow_status_query( query: Option<&str>, ) -> Result { - let params: HashMap = form_urlencoded::parse(query.unwrap_or("").as_bytes()) - .into_owned() - .collect(); + let params = parse_params(query); Ok(ResetWorkflowStatusQuery { force: parse_optional_bool(¶ms, "force")?, }) } +/// Decode a percent-encoded query string into a `HashMap` of (key, value) pairs. +/// +/// Used by every `parse_*_query` helper as the first step. Behaves identically to the inline +/// `form_urlencoded::parse(query.unwrap_or("").as_bytes()).into_owned().collect()` it replaces: +/// missing query string -> empty map; repeated keys -> last value wins. +pub(super) fn parse_params(query: Option<&str>) -> HashMap { + form_urlencoded::parse(query.unwrap_or("").as_bytes()) + .into_owned() + .collect() +} + pub(super) fn parse_required_i64( params: &HashMap, key: &str, diff --git a/src/server/http_transport/response_mapping.rs b/src/server/http_transport/response_mapping.rs index 448f2674..4cb1c4eb 100644 --- a/src/server/http_transport/response_mapping.rs +++ b/src/server/http_transport/response_mapping.rs @@ -1,77 +1,28 @@ use super::*; -macro_rules! map_response_std { - ($name:ident, $ty:path, $success:ident) => { - pub(crate) fn $name(response: $ty) -> Response { - use $ty::*; - match response { - $success(body) => json_response_with_status(&body, StatusCode::OK), - ForbiddenErrorResponse(body) => { - json_response_with_status(&body, StatusCode::FORBIDDEN) - } - NotFoundErrorResponse(body) => { - json_response_with_status(&body, StatusCode::NOT_FOUND) - } - DefaultErrorResponse(body) => { - json_response_with_status(&body, StatusCode::INTERNAL_SERVER_ERROR) - } - } - } - }; -} - -macro_rules! map_response_not_found_only { - ($name:ident, $ty:path, $success:ident) => { - pub(crate) fn $name(response: $ty) -> Response { - use $ty::*; - match response { - $success(body) => json_response_with_status(&body, StatusCode::OK), - NotFoundErrorResponse(body) => { - json_response_with_status(&body, StatusCode::NOT_FOUND) - } - DefaultErrorResponse(body) => { - json_response_with_status(&body, StatusCode::INTERNAL_SERVER_ERROR) - } - } - } - }; -} - -macro_rules! map_response_conflict { - ($name:ident, $ty:path, $success:ident, $conflict:ident) => { - pub(crate) fn $name(response: $ty) -> Response { - use $ty::*; - match response { - $success(body) => json_response_with_status(&body, StatusCode::OK), - ForbiddenErrorResponse(body) => { - json_response_with_status(&body, StatusCode::FORBIDDEN) - } - NotFoundErrorResponse(body) => { - json_response_with_status(&body, StatusCode::NOT_FOUND) - } - $conflict(body) => json_response_with_status(&body, StatusCode::CONFLICT), - DefaultErrorResponse(body) => { - json_response_with_status(&body, StatusCode::INTERNAL_SERVER_ERROR) - } - } - } - }; -} - -macro_rules! map_response_accepted_conflict { - ($name:ident, $ty:path, $success:ident, $accepted:ident, $conflict:ident) => { +/// Define a `pub(crate) fn $name(response: $ty) -> Response` that maps a +/// transport response enum onto an HTTP response. +/// +/// Every `*Response` enum here shares a `Forbidden / NotFound / Default` tail, +/// so this macro hard-codes those three terminal arms and lets call sites +/// declare just the success variant plus any extra arms (Conflict, Accepted, +/// Unprocessable, etc.) as `Variant => StatusCode::XYZ` pairs. +/// +/// Use [`map_response_no_forbidden!`] for the rare enums (currently `GetTask*`) +/// that omit the Forbidden variant. +macro_rules! map_response { + ($name:ident, $ty:path, $success:ident $(, $extra_variant:ident => $extra_status:expr)* $(,)?) => { pub(crate) fn $name(response: $ty) -> Response { use $ty::*; match response { $success(body) => json_response_with_status(&body, StatusCode::OK), - $accepted(body) => json_response_with_status(&body, StatusCode::ACCEPTED), + $($extra_variant(body) => json_response_with_status(&body, $extra_status),)* ForbiddenErrorResponse(body) => { json_response_with_status(&body, StatusCode::FORBIDDEN) } NotFoundErrorResponse(body) => { json_response_with_status(&body, StatusCode::NOT_FOUND) } - $conflict(body) => json_response_with_status(&body, StatusCode::CONFLICT), DefaultErrorResponse(body) => { json_response_with_status(&body, StatusCode::INTERNAL_SERVER_ERROR) } @@ -80,21 +31,17 @@ macro_rules! map_response_accepted_conflict { }; } -macro_rules! map_response_unprocessable { +/// Variant of [`map_response!`] for response enums whose contract does not +/// include `ForbiddenErrorResponse` (e.g. `GetTaskResponse`). +macro_rules! map_response_no_forbidden { ($name:ident, $ty:path, $success:ident) => { pub(crate) fn $name(response: $ty) -> Response { use $ty::*; match response { $success(body) => json_response_with_status(&body, StatusCode::OK), - ForbiddenErrorResponse(body) => { - json_response_with_status(&body, StatusCode::FORBIDDEN) - } NotFoundErrorResponse(body) => { json_response_with_status(&body, StatusCode::NOT_FOUND) } - UnprocessableContentErrorResponse(body) => { - json_response_with_status(&body, StatusCode::UNPROCESSABLE_ENTITY) - } DefaultErrorResponse(body) => { json_response_with_status(&body, StatusCode::INTERNAL_SERVER_ERROR) } @@ -103,546 +50,557 @@ macro_rules! map_response_unprocessable { }; } -map_response_std!( +map_response!( list_compute_nodes_response, ListComputeNodesResponse, SuccessfulResponse ); -map_response_std!( +map_response!( create_compute_node_response, CreateComputeNodeResponse, SuccessfulResponse ); -map_response_std!( +map_response!( create_event_response, CreateEventResponse, SuccessfulResponse ); -map_response_std!(create_file_response, CreateFileResponse, SuccessfulResponse); -map_response_std!( +map_response!(create_file_response, CreateFileResponse, SuccessfulResponse); +map_response!( create_local_scheduler_response, CreateLocalSchedulerResponse, SuccessfulResponse ); -map_response_std!( +map_response!( create_result_response, CreateResultResponse, SuccessfulResponse ); -map_response_std!( +map_response!( create_user_data_response, CreateUserDataResponse, SuccessfulResponse ); -map_response_std!( +map_response!( create_scheduled_compute_node_response, CreateScheduledComputeNodeResponse, SuccessfulResponse ); -map_response_std!( +map_response!( create_slurm_scheduler_response, CreateSlurmSchedulerResponse, SuccessfulResponse ); -map_response_conflict!( +map_response!( create_access_group_response, CreateAccessGroupResponse, SuccessfulResponse, - ConflictErrorResponse + ConflictErrorResponse => StatusCode::CONFLICT ); -map_response_unprocessable!(create_jobs_response, CreateJobsResponse, SuccessfulResponse); -map_response_std!( +map_response!(create_jobs_response, CreateJobsResponse, SuccessfulResponse, UnprocessableContentErrorResponse => StatusCode::UNPROCESSABLE_ENTITY); +map_response!( create_failure_handler_response, CreateFailureHandlerResponse, SuccessfulResponse ); -map_response_unprocessable!( +map_response!( create_resource_requirements_response, CreateResourceRequirementsResponse, - SuccessfulResponse + SuccessfulResponse, + UnprocessableContentErrorResponse => StatusCode::UNPROCESSABLE_ENTITY ); -map_response_std!( +map_response!( create_slurm_stats_response, CreateSlurmStatsResponse, SuccessfulResponse ); -map_response_unprocessable!( +map_response!( create_ro_crate_entity_response, CreateRoCrateEntityResponse, - SuccessfulResponse + SuccessfulResponse, + UnprocessableContentErrorResponse => StatusCode::UNPROCESSABLE_ENTITY ); -map_response_std!( +map_response!( create_remote_workers_response, CreateRemoteWorkersResponse, SuccessfulResponse ); -map_response_std!( +map_response!( update_compute_node_response, UpdateComputeNodeResponse, SuccessfulResponse ); -map_response_std!( +map_response!( update_event_response, UpdateEventResponse, SuccessfulResponse ); -map_response_std!(update_file_response, UpdateFileResponse, SuccessfulResponse); -map_response_std!( +map_response!(update_file_response, UpdateFileResponse, SuccessfulResponse); +map_response!( update_local_scheduler_response, UpdateLocalSchedulerResponse, SuccessfulResponse ); -map_response_std!( +map_response!( update_result_response, UpdateResultResponse, SuccessfulResponse ); -map_response_std!( +map_response!( update_user_data_response, UpdateUserDataResponse, SuccessfulResponse ); -map_response_std!( +map_response!( update_scheduled_compute_node_response, UpdateScheduledComputeNodeResponse, ScheduledComputeNodeUpdatedInTheTable ); -map_response_std!( +map_response!( update_slurm_scheduler_response, UpdateSlurmSchedulerResponse, SuccessfulResponse ); -map_response_unprocessable!( +map_response!( update_resource_requirements_response, UpdateResourceRequirementsResponse, - SuccessfulResponse + SuccessfulResponse, + UnprocessableContentErrorResponse => StatusCode::UNPROCESSABLE_ENTITY ); -map_response_std!( +map_response!( delete_compute_nodes_response, DeleteComputeNodesResponse, SuccessfulResponse ); -map_response_std!( +map_response!( delete_events_response, DeleteEventsResponse, SuccessfulResponse ); -map_response_std!( +map_response!( delete_files_response, DeleteFilesResponse, SuccessfulResponse ); -map_response_std!( +map_response!( delete_local_schedulers_response, DeleteLocalSchedulersResponse, SuccessfulResponse ); -map_response_std!( +map_response!( delete_results_response, DeleteResultsResponse, SuccessfulResponse ); -map_response_std!( +map_response!( delete_all_user_data_response, DeleteAllUserDataResponse, SuccessfulResponse ); -map_response_std!( +map_response!( delete_scheduled_compute_nodes_response, DeleteScheduledComputeNodesResponse, SuccessfulResponse ); -map_response_std!( +map_response!( delete_slurm_schedulers_response, DeleteSlurmSchedulersResponse, Message ); -map_response_std!( +map_response!( delete_access_group_response, DeleteAccessGroupResponse, SuccessfulResponse ); -map_response_std!( +map_response!( delete_all_resource_requirements_response, DeleteAllResourceRequirementsResponse, SuccessfulResponse ); -map_response_std!( +map_response!( delete_failure_handler_response, DeleteFailureHandlerResponse, SuccessfulResponse ); -map_response_std!( +map_response!( delete_resource_requirements_response, DeleteResourceRequirementsResponse, SuccessfulResponse ); -map_response_std!( +map_response!( delete_ro_crate_entity_response, DeleteRoCrateEntityResponse, SuccessfulResponse ); -map_response_std!( +map_response!( delete_ro_crate_entities_response, DeleteRoCrateEntitiesResponse, SuccessfulResponse ); -map_response_std!( +map_response!( delete_remote_worker_response, DeleteRemoteWorkerResponse, SuccessfulResponse ); -map_response_std!(list_events_response, ListEventsResponse, SuccessfulResponse); -map_response_std!(list_files_response, ListFilesResponse, SuccessfulResponse); -map_response_std!( +map_response!(list_events_response, ListEventsResponse, SuccessfulResponse); +map_response!(list_files_response, ListFilesResponse, SuccessfulResponse); +map_response!( list_local_schedulers_response, ListLocalSchedulersResponse, HTTP ); -map_response_std!( +map_response!( list_results_response, ListResultsResponse, SuccessfulResponse ); -map_response_std!( +map_response!( list_user_data_response, ListUserDataResponse, SuccessfulResponse ); -map_response_std!( +map_response!( list_scheduled_compute_nodes_response, ListScheduledComputeNodesResponse, SuccessfulResponse ); -map_response_std!( +map_response!( list_slurm_schedulers_response, ListSlurmSchedulersResponse, SuccessfulResponse ); -map_response_std!( +map_response!( list_access_groups_response, ListAccessGroupsApiResponse, SuccessfulResponse ); -map_response_std!( +map_response!( list_group_members_response, ListGroupMembersResponse, SuccessfulResponse ); -map_response_std!( +map_response!( list_user_groups_response, ListUserGroupsApiResponse, SuccessfulResponse ); -map_response_std!( +map_response!( list_workflow_groups_response, ListWorkflowGroupsResponse, SuccessfulResponse ); -map_response_std!( +map_response!( list_failure_handlers_response, ListFailureHandlersResponse, SuccessfulResponse ); -map_response_std!( +map_response!( list_resource_requirements_response, ListResourceRequirementsResponse, SuccessfulResponse ); -map_response_std!( +map_response!( list_slurm_stats_response, ListSlurmStatsResponse, SuccessfulResponse ); -map_response_std!( +map_response!( list_ro_crate_entities_response, ListRoCrateEntitiesResponse, SuccessfulResponse ); -map_response_std!( +map_response!( list_remote_workers_response, ListRemoteWorkersResponse, SuccessfulResponse ); -map_response_std!( +map_response!( get_compute_node_response, GetComputeNodeResponse, SuccessfulResponse ); -map_response_std!( +map_response!( delete_compute_node_response, DeleteComputeNodeResponse, SuccessfulResponse ); -map_response_std!( +map_response!( delete_event_response, DeleteEventResponse, SuccessfulResponse ); -map_response_std!(delete_file_response, DeleteFileResponse, SuccessfulResponse); -map_response_std!( +map_response!(delete_file_response, DeleteFileResponse, SuccessfulResponse); +map_response!( delete_local_scheduler_response, DeleteLocalSchedulerResponse, LocalComputeNodeConfigurationStoredInTheTable ); -map_response_std!( +map_response!( delete_result_response, DeleteResultResponse, SuccessfulResponse ); -map_response_std!( +map_response!( delete_user_data_response, DeleteUserDataResponse, SuccessfulResponse ); -map_response_std!( +map_response!( delete_scheduled_compute_node_response, DeleteScheduledComputeNodeResponse, SuccessfulResponse ); -map_response_std!( +map_response!( delete_slurm_scheduler_response, DeleteSlurmSchedulerResponse, SuccessfulResponse ); -map_response_std!(get_event_response, GetEventResponse, SuccessfulResponse); -map_response_std!(get_file_response, GetFileResponse, SuccessfulResponse); -map_response_std!( +map_response!(get_event_response, GetEventResponse, SuccessfulResponse); +map_response!(get_file_response, GetFileResponse, SuccessfulResponse); +map_response!( get_local_scheduler_response, GetLocalSchedulerResponse, SuccessfulResponse ); -map_response_std!(get_result_response, GetResultResponse, SuccessfulResponse); -map_response_std!( +map_response!(get_result_response, GetResultResponse, SuccessfulResponse); +map_response!( get_user_data_response, GetUserDataResponse, SuccessfulResponse ); -map_response_std!( +map_response!( get_scheduled_compute_node_response, GetScheduledComputeNodeResponse, HTTP ); -map_response_std!( +map_response!( get_slurm_scheduler_response, GetSlurmSchedulerResponse, SuccessfulResponse ); -map_response_std!( +map_response!( get_access_group_response, GetAccessGroupResponse, SuccessfulResponse ); -map_response_std!( +map_response!( get_failure_handler_response, GetFailureHandlerResponse, SuccessfulResponse ); -map_response_std!( +map_response!( get_resource_requirements_response, GetResourceRequirementsResponse, SuccessfulResponse ); -map_response_std!( +map_response!( get_ro_crate_entity_response, GetRoCrateEntityResponse, SuccessfulResponse ); -map_response_conflict!( +map_response!( add_user_to_group_response, AddUserToGroupResponse, SuccessfulResponse, - ConflictErrorResponse + ConflictErrorResponse => StatusCode::CONFLICT ); -map_response_std!( +map_response!( remove_user_from_group_response, RemoveUserFromGroupResponse, SuccessfulResponse ); -map_response_conflict!( +map_response!( add_workflow_to_group_response, AddWorkflowToGroupResponse, SuccessfulResponse, - ConflictErrorResponse + ConflictErrorResponse => StatusCode::CONFLICT ); -map_response_std!( +map_response!( remove_workflow_from_group_response, RemoveWorkflowFromGroupResponse, SuccessfulResponse ); -map_response_std!( +map_response!( check_workflow_access_response, CheckWorkflowAccessResponse, SuccessfulResponse ); -map_response_std!(reload_auth_response, ReloadAuthResponse, SuccessfulResponse); -map_response_unprocessable!( +map_response!(reload_auth_response, ReloadAuthResponse, SuccessfulResponse); +map_response!( update_ro_crate_entity_response, UpdateRoCrateEntityResponse, - SuccessfulResponse -); -map_response_unprocessable!(create_job_response, CreateJobResponse, SuccessfulResponse); -map_response_std!(list_jobs_response, ListJobsResponse, SuccessfulResponse); -map_response_std!(delete_jobs_response, DeleteJobsResponse, SuccessfulResponse); -map_response_std!(get_job_response, GetJobResponse, SuccessfulResponse); -map_response_unprocessable!(update_job_response, UpdateJobResponse, SuccessfulResponse); -map_response_std!(delete_job_response, DeleteJobResponse, SuccessfulResponse); -map_response_unprocessable!( + SuccessfulResponse, + UnprocessableContentErrorResponse => StatusCode::UNPROCESSABLE_ENTITY +); +map_response!(create_job_response, CreateJobResponse, SuccessfulResponse, UnprocessableContentErrorResponse => StatusCode::UNPROCESSABLE_ENTITY); +map_response!(list_jobs_response, ListJobsResponse, SuccessfulResponse); +map_response!(delete_jobs_response, DeleteJobsResponse, SuccessfulResponse); +map_response!(get_job_response, GetJobResponse, SuccessfulResponse); +map_response!(update_job_response, UpdateJobResponse, SuccessfulResponse, UnprocessableContentErrorResponse => StatusCode::UNPROCESSABLE_ENTITY); +map_response!(delete_job_response, DeleteJobResponse, SuccessfulResponse); +map_response!( complete_job_response, CompleteJobResponse, - SuccessfulResponse + SuccessfulResponse, + UnprocessableContentErrorResponse => StatusCode::UNPROCESSABLE_ENTITY ); -map_response_std!( +map_response!( batch_complete_jobs_response, BatchCompleteJobsResponse, SuccessfulResponse ); -map_response_unprocessable!( +map_response!( manage_status_change_response, ManageStatusChangeResponse, - SuccessfulResponse + SuccessfulResponse, + UnprocessableContentErrorResponse => StatusCode::UNPROCESSABLE_ENTITY ); -map_response_unprocessable!(start_job_response, StartJobResponse, SuccessfulResponse); -map_response_unprocessable!(retry_job_response, RetryJobResponse, SuccessfulResponse); -map_response_unprocessable!( +map_response!(start_job_response, StartJobResponse, SuccessfulResponse, UnprocessableContentErrorResponse => StatusCode::UNPROCESSABLE_ENTITY); +map_response!(retry_job_response, RetryJobResponse, SuccessfulResponse, UnprocessableContentErrorResponse => StatusCode::UNPROCESSABLE_ENTITY); +map_response!( create_workflow_response, CreateWorkflowResponse, - SuccessfulResponse + SuccessfulResponse, + UnprocessableContentErrorResponse => StatusCode::UNPROCESSABLE_ENTITY ); -map_response_std!( +map_response!( list_workflows_response, ListWorkflowsResponse, SuccessfulResponse ); -map_response_std!( +map_response!( get_workflow_response, GetWorkflowResponse, SuccessfulResponse ); -map_response_unprocessable!( +map_response!( update_workflow_response, UpdateWorkflowResponse, - SuccessfulResponse + SuccessfulResponse, + UnprocessableContentErrorResponse => StatusCode::UNPROCESSABLE_ENTITY ); -map_response_std!( +map_response!( delete_workflow_response, DeleteWorkflowResponse, SuccessfulResponse ); -map_response_unprocessable!( +map_response!( create_workflow_action_response, CreateWorkflowActionResponse, - SuccessfulResponse + SuccessfulResponse, + UnprocessableContentErrorResponse => StatusCode::UNPROCESSABLE_ENTITY ); -map_response_std!( +map_response!( get_workflow_actions_response, GetWorkflowActionsResponse, SuccessfulResponse ); -map_response_std!( +map_response!( get_pending_actions_response, GetPendingActionsResponse, SuccessfulResponse ); -map_response_conflict!( +map_response!( claim_action_response, ClaimActionResponse, SuccessfulResponse, - ConflictResponse + ConflictResponse => StatusCode::CONFLICT ); -map_response_std!( +map_response!( cancel_workflow_response, CancelWorkflowResponse, SuccessfulResponse ); -map_response_std!( +map_response!( archive_workflow_response, ArchiveWorkflowResponse, SuccessfulResponse ); -map_response_unprocessable!( +map_response!( claim_jobs_based_on_resources_response, ClaimJobsBasedOnResources, - SuccessfulResponse + SuccessfulResponse, + UnprocessableContentErrorResponse => StatusCode::UNPROCESSABLE_ENTITY ); -map_response_std!( +map_response!( claim_next_jobs_response, ClaimNextJobsResponse, SuccessfulResponse ); -map_response_not_found_only!(get_task_response, GetTaskResponse, SuccessfulResponse); +map_response_no_forbidden!(get_task_response, GetTaskResponse, SuccessfulResponse); -map_response_not_found_only!( +map_response_no_forbidden!( get_active_task_response, GetActiveTaskResponse, SuccessfulResponse ); -map_response_accepted_conflict!( +map_response!( initialize_jobs_response, InitializeJobsResponse, SuccessfulResponse, - AcceptedResponse, - ConflictErrorResponse + AcceptedResponse => StatusCode::ACCEPTED, + ConflictErrorResponse => StatusCode::CONFLICT ); -map_response_std!( +map_response!( is_workflow_complete_response, IsWorkflowCompleteResponse, SuccessfulResponse ); -map_response_std!( +map_response!( is_workflow_uninitialized_response, IsWorkflowUninitializedResponse, SuccessfulResponse ); -map_response_std!( +map_response!( list_job_dependencies_response, ListJobDependenciesResponse, SuccessfulResponse ); -map_response_std!( +map_response!( list_job_file_relationships_response, ListJobFileRelationshipsResponse, SuccessfulResponse ); -map_response_std!( +map_response!( list_job_ids_response, ListJobIdsResponse, SuccessfulResponse ); -map_response_std!( +map_response!( list_job_user_data_relationships_response, ListJobUserDataRelationshipsResponse, SuccessfulResponse ); -map_response_std!( +map_response!( list_missing_user_data_response, ListMissingUserDataResponse, SuccessfulResponse ); -map_response_std!( +map_response!( process_changed_job_inputs_response, ProcessChangedJobInputsResponse, SuccessfulResponse ); -map_response_std!( +map_response!( get_ready_job_requirements_response, GetReadyJobRequirementsResponse, SuccessfulResponse ); -map_response_std!( +map_response!( list_required_existing_files_response, ListRequiredExistingFilesResponse, SuccessfulResponse ); -map_response_std!( +map_response!( reset_job_status_response, ResetJobStatusResponse, SuccessfulResponse ); -map_response_unprocessable!( +map_response!( reset_workflow_status_response, ResetWorkflowStatusResponse, - SuccessfulResponse + SuccessfulResponse, + UnprocessableContentErrorResponse => StatusCode::UNPROCESSABLE_ENTITY ); pub(crate) fn json_response(body: &T) -> Response where diff --git a/tests/test_access_groups.rs b/tests/test_access_groups.rs index 06a5b8a5..68c7207e 100644 --- a/tests/test_access_groups.rs +++ b/tests/test_access_groups.rs @@ -1371,7 +1371,7 @@ fn test_comprehensive_access_control_workflow_execution( assert_eq!( job.status.unwrap(), models::JobStatus::Completed, - "Job {} should be completed. actual status: {:?}", + "job name='{}' should be completed. actual status={:?}", job.name, job.status ); diff --git a/tests/test_auto_ro_crate.rs b/tests/test_auto_ro_crate.rs index c3718c63..02831ccc 100644 --- a/tests/test_auto_ro_crate.rs +++ b/tests/test_auto_ro_crate.rs @@ -554,7 +554,7 @@ fn test_auto_ro_crate_diamond_workflow(start_server: &ServerProcess) { assert_eq!( job.status, Some(models::JobStatus::Completed), - "Job {} should be completed", + "job name='{}' should be completed", job.name ); } diff --git a/tests/test_claim_next_jobs.rs b/tests/test_claim_next_jobs.rs index 94666f44..3bd41713 100644 --- a/tests/test_claim_next_jobs.rs +++ b/tests/test_claim_next_jobs.rs @@ -230,7 +230,7 @@ fn test_prepare_next_jobs_marks_jobs_pending(start_server: &ServerProcess) { assert_eq!( job.status.expect("Job status should be present"), models::JobStatus::Pending, - "Job {} should be marked as Pending", + "job name='{}' should be marked as Pending", job.name ); @@ -299,7 +299,7 @@ fn test_prepare_next_jobs_exhaust_all_jobs(start_server: &ServerProcess) { let job_id = job.id.expect("Job should have ID"); assert!( all_job_ids.insert(job_id), - "Job {} returned multiple times", + "job_id={} returned multiple times", job_id ); total_jobs_received += 1; diff --git a/tests/test_completion_reversal.rs b/tests/test_completion_reversal.rs index 128d52b2..896c3c57 100644 --- a/tests/test_completion_reversal.rs +++ b/tests/test_completion_reversal.rs @@ -308,7 +308,7 @@ fn test_completion_reversal_complex_dependencies(start_server: &ServerProcess) { assert_eq!( job.status.unwrap(), JobStatus::Uninitialized, - "Job {} should be Uninitialized after completion reversal", + "job_id={} should be Uninitialized after completion reversal", job_id ); } diff --git a/tests/test_correct_resources.rs b/tests/test_correct_resources.rs index 76fd6395..db92a929 100644 --- a/tests/test_correct_resources.rs +++ b/tests/test_correct_resources.rs @@ -948,7 +948,7 @@ fn claim_and_complete_jobs( // Verify this job was claimed assert!( claimed.iter().any(|j| j.id == Some(*job_id)), - "Job {} should have been claimed", + "job_id={} should have been claimed", job_id ); diff --git a/tests/test_full_workflows.rs b/tests/test_full_workflows.rs index e9c01571..741a2e23 100644 --- a/tests/test_full_workflows.rs +++ b/tests/test_full_workflows.rs @@ -99,7 +99,7 @@ fn verify_diamond_workflow_completion( assert_eq!( job.status.unwrap(), models::JobStatus::Completed, - "Job {} should be completed. actual status: {:?}", + "job name='{}' should be completed. actual status={:?}", job.name, job.status ); @@ -428,7 +428,7 @@ fn verify_many_jobs_completion( assert_eq!( job.status.unwrap(), models::JobStatus::Completed, - "Job {} should be completed. actual status: {:?}", + "job name='{}' should be completed. actual status={:?}", job.name, job.status ); @@ -790,7 +790,7 @@ resource_requirements: assert_eq!( job.status.unwrap(), models::JobStatus::Completed, - "Job {} should be completed after second run, got {:?}", + "job name='{}' should be completed after second run, got status={:?}", job.name, job.status ); @@ -1121,7 +1121,7 @@ resource_requirements: assert_eq!( job.status.unwrap(), models::JobStatus::Completed, - "Job {} should be completed after second run, got {:?}", + "job name='{}' should be completed after second run, got status={:?}", job.name, job.status ); diff --git a/tests/test_jobs.rs b/tests/test_jobs.rs index 9d7b30c7..9b9cff0e 100644 --- a/tests/test_jobs.rs +++ b/tests/test_jobs.rs @@ -949,7 +949,7 @@ fn test_jobs_delete_multiple(start_server: &ServerProcess) { // Verify all jobs are actually removed for job_id in [job1_id, job2_id, job3_id] { let get_result = apis::jobs_api::get_job(config, job_id); - assert!(get_result.is_err(), "Job {} should be deleted", job_id); + assert!(get_result.is_err(), "job_id={} should be deleted", job_id); } } diff --git a/tests/test_slurm_commands.rs b/tests/test_slurm_commands.rs index 8a046fce..1fc19be1 100644 --- a/tests/test_slurm_commands.rs +++ b/tests/test_slurm_commands.rs @@ -271,7 +271,7 @@ fn test_get_statuses_multiple_jobs() { for job_id in &job_ids { assert!( statuses_map.contains_key(job_id), - "Job {} not found in statuses", + "job_id={} not found in statuses", job_id ); assert_eq!(statuses_map[job_id], HpcJobStatus::Queued); @@ -1658,7 +1658,7 @@ fn test_slurm_run_jobs(start_server: &ServerProcess) { assert_eq!( job.status.unwrap(), models::JobStatus::Completed, - "Job {} should be Completed", + "job name='{}' should be Completed", job.name ); } diff --git a/tests/test_workflow_manager.rs b/tests/test_workflow_manager.rs index 0371cd97..dab6aa86 100644 --- a/tests/test_workflow_manager.rs +++ b/tests/test_workflow_manager.rs @@ -3128,7 +3128,7 @@ fn test_initialize_async_with_dependencies_completes_task_and_readies_jobs( assert_eq!( job.status, Some(models::JobStatus::Uninitialized), - "job {} should start Uninitialized, got {:?}", + "job_id={} should start Uninitialized, got status={:?}", id, job.status ); diff --git a/tests/test_workflow_spec.rs b/tests/test_workflow_spec.rs index b31ed5f8..5483ef48 100644 --- a/tests/test_workflow_spec.rs +++ b/tests/test_workflow_spec.rs @@ -3724,7 +3724,7 @@ fn test_subgraph_workflow_generated_actions_have_correct_triggers() { let sched = job .scheduler .as_ref() - .unwrap_or_else(|| panic!("Job {} should have scheduler assigned", job.name)); + .unwrap_or_else(|| panic!("job name='{}' should have scheduler assigned", job.name)); let trigger = scheduler_triggers .get(sched) .unwrap_or_else(|| panic!("Scheduler {} should have action", sched)); @@ -3737,7 +3737,7 @@ fn test_subgraph_workflow_generated_actions_have_correct_triggers() { assert_eq!( trigger, expected_trigger, - "Job {} (scheduler {}) should have trigger {}, got {}", + "job name='{}' scheduler='{}' should have trigger='{}', got trigger='{}'", job.name, sched, expected_trigger, trigger ); }