Skip to content

Commit 99ea8f5

Browse files
committed
refactor(api-server): Make CompressionUsage OpenAPI fields user-oriented.
- Change `status: i32` to `job_status: CompressionJobStatus` with typed enum - Rename fields to include units: `_millisecs`, `_secs`, `_bytes` - Rewrite docstrings to remove DB internals/Python file references - Add `ToSchema` derive and user-facing docstrings to `CompressionJobStatus` - Register `CompressionJobStatus` in OpenAPI schema components
1 parent 22196a6 commit 99ea8f5

File tree

4 files changed

+69
-50
lines changed

4 files changed

+69
-50
lines changed

components/api-server/src/client.rs

Lines changed: 21 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -133,36 +133,38 @@ impl TryFrom<CompressionUsageParams> for ValidatedCompressionUsageParams {
133133
}
134134
}
135135

136-
/// A single row returned by the compression usage query (one row per job).
136+
/// Resource usage statistics for the compression job with the specified ID.
137137
#[derive(Serialize, sqlx::FromRow, ToSchema)]
138138
pub struct CompressionUsage {
139139
/// Compression job ID.
140140
pub id: i32,
141-
/// Job status. See `CompressionJobStatus` in
142-
/// `components/job-orchestration/job_orchestration/scheduler/constants.py`:
143-
/// 0 = PENDING, 1 = RUNNING, 2 = SUCCEEDED, 3 = FAILED, 4 = KILLED.
144-
pub status: i32,
141+
/// Current status of the job.
142+
#[sqlx(rename = "status", try_from = "i32")]
143+
pub job_status: CompressionJobStatus,
145144
/// Time the job was created (epoch milliseconds).
146145
#[serde(with = "chrono::serde::ts_milliseconds")]
147146
#[schema(value_type = i64)]
148-
pub creation_time: DateTime<Utc>,
149-
/// Time the job started (epoch milliseconds). Always non-null in results
150-
/// because the WHERE clause filters on `start_time`.
147+
#[sqlx(rename = "creation_time")]
148+
pub time_creation_millisecs: DateTime<Utc>,
149+
/// Time the job started executing (epoch milliseconds).
151150
#[serde(with = "chrono::serde::ts_milliseconds")]
152151
#[schema(value_type = i64)]
153-
pub start_time: DateTime<Utc>,
154-
/// Wall-clock seconds the job ran for. `None` for non-succeeded jobs
155-
/// (FAILED, KILLED, RUNNING) since `duration` is only set on completion.
156-
pub duration: Option<f64>,
157-
/// Total uncompressed size of input files (bytes).
158-
pub uncompressed_size: i64,
159-
/// Total compressed archive size (bytes).
160-
pub compressed_size: i64,
152+
#[sqlx(rename = "start_time")]
153+
pub time_begin_millisecs: DateTime<Utc>,
154+
/// Wall-clock duration the job ran, in seconds. Absent if the job did not complete.
155+
#[sqlx(rename = "duration")]
156+
pub duration_secs: Option<f64>,
157+
/// Total uncompressed size of input files, in bytes.
158+
#[sqlx(rename = "uncompressed_size")]
159+
pub uncompressed_size_bytes: i64,
160+
/// Total compressed archive size, in bytes.
161+
#[sqlx(rename = "compressed_size")]
162+
pub compressed_size_bytes: i64,
161163
/// Number of tasks the job was split into.
162164
pub num_tasks: i32,
163-
/// Sum of all task durations (CPU-seconds across all parallel workers).
164-
/// `None` if all task duration values are NULL in the database.
165-
pub tasks_duration: Option<f64>,
165+
/// Sum of all task durations, in seconds. Absent if no tasks reported a duration.
166+
#[sqlx(rename = "tasks_duration")]
167+
pub tasks_duration_secs: Option<f64>,
166168
}
167169

168170
/// Defines the request configuration for submitting a search query.

components/api-server/src/routes.rs

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -73,6 +73,7 @@ mod api_doc {
7373
__path_query_results,
7474
CompressionUsage,
7575
};
76+
use crate::client::CompressionJobStatus;
7677

7778
#[derive(utoipa::OpenApi)]
7879
#[openapi(
@@ -82,7 +83,7 @@ mod api_doc {
8283
contact(name = "YScope")
8384
),
8485
paths(health, query, query_results, cancel_query, compression_usage),
85-
components(schemas(CompressionUsage))
86+
components(schemas(CompressionUsage, CompressionJobStatus))
8687
)]
8788
pub struct ApiDoc;
8889
}

components/clp-rust-utils/src/job_config/compression.rs

Lines changed: 8 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,10 +1,11 @@
11
use num_enum::{IntoPrimitive, TryFromPrimitive};
22
use serde::{Deserialize, Serialize};
33
use strum::EnumString;
4+
use utoipa::ToSchema;
45

56
pub type CompressionJobId = i32;
67

7-
/// Mirror of `job_orchestration.scheduler.constants.CompressionJobStatus`. Must be kept in sync.
8+
// Mirror of `job_orchestration.scheduler.constants.CompressionJobStatus`. Must be kept in sync.
89
#[derive(
910
Clone,
1011
Copy,
@@ -15,14 +16,20 @@ pub type CompressionJobId = i32;
1516
IntoPrimitive,
1617
PartialEq,
1718
Serialize,
19+
ToSchema,
1820
TryFromPrimitive,
1921
)]
2022
#[repr(i32)]
2123
#[strum(ascii_case_insensitive)]
2224
pub enum CompressionJobStatus {
25+
/// Job is waiting to be scheduled.
2326
Pending = 0,
27+
/// Job is currently executing.
2428
Running = 1,
29+
/// Job completed successfully.
2530
Succeeded = 2,
31+
/// Job failed.
2632
Failed = 3,
33+
/// Job was killed by a user.
2734
Killed = 4,
2835
}

docs/src/_static/generated/api-server-openapi.json

Lines changed: 38 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -213,69 +213,78 @@
213213
},
214214
"components": {
215215
"schemas": {
216+
"CompressionJobStatus": {
217+
"type": "string",
218+
"enum": [
219+
"Pending",
220+
"Running",
221+
"Succeeded",
222+
"Failed",
223+
"Killed"
224+
]
225+
},
216226
"CompressionUsage": {
217227
"type": "object",
218-
"description": "A single row returned by the compression usage query (one row per job).",
228+
"description": "Resource usage statistics for the compression job with the specified ID.",
219229
"required": [
220230
"id",
221-
"status",
222-
"creation_time",
223-
"start_time",
224-
"uncompressed_size",
225-
"compressed_size",
231+
"job_status",
232+
"time_creation_millisecs",
233+
"time_begin_millisecs",
234+
"uncompressed_size_bytes",
235+
"compressed_size_bytes",
226236
"num_tasks"
227237
],
228238
"properties": {
229-
"compressed_size": {
239+
"compressed_size_bytes": {
230240
"type": "integer",
231241
"format": "int64",
232-
"description": "Total compressed archive size (bytes)."
233-
},
234-
"creation_time": {
235-
"type": "integer",
236-
"format": "int64",
237-
"description": "Time the job was created (epoch milliseconds)."
242+
"description": "Total compressed archive size, in bytes."
238243
},
239-
"duration": {
244+
"duration_secs": {
240245
"type": [
241246
"number",
242247
"null"
243248
],
244249
"format": "double",
245-
"description": "Wall-clock seconds the job ran for. `None` for non-succeeded jobs\n(FAILED, KILLED, RUNNING) since `duration` is only set on completion."
250+
"description": "Wall-clock duration the job ran, in seconds. Absent if the job did not complete."
246251
},
247252
"id": {
248253
"type": "integer",
249254
"format": "int32",
250255
"description": "Compression job ID."
251256
},
257+
"job_status": {
258+
"$ref": "#/components/schemas/CompressionJobStatus",
259+
"description": "Current status of the job."
260+
},
252261
"num_tasks": {
253262
"type": "integer",
254263
"format": "int32",
255264
"description": "Number of tasks the job was split into."
256265
},
257-
"start_time": {
258-
"type": "integer",
259-
"format": "int64",
260-
"description": "Time the job started (epoch milliseconds). Always non-null in results\nbecause the WHERE clause filters on `start_time`."
261-
},
262-
"status": {
263-
"type": "integer",
264-
"format": "int32",
265-
"description": "Job status. See `CompressionJobStatus` in\n`components/job-orchestration/job_orchestration/scheduler/constants.py`:\n0 = PENDING, 1 = RUNNING, 2 = SUCCEEDED, 3 = FAILED, 4 = KILLED."
266-
},
267-
"tasks_duration": {
266+
"tasks_duration_secs": {
268267
"type": [
269268
"number",
270269
"null"
271270
],
272271
"format": "double",
273-
"description": "Sum of all task durations (CPU-seconds across all parallel workers).\n`None` if all task duration values are NULL in the database."
272+
"description": "Sum of all task durations, in seconds. Absent if no tasks reported a duration."
273+
},
274+
"time_begin_millisecs": {
275+
"type": "integer",
276+
"format": "int64",
277+
"description": "Time the job started executing (epoch milliseconds)."
278+
},
279+
"time_creation_millisecs": {
280+
"type": "integer",
281+
"format": "int64",
282+
"description": "Time the job was created (epoch milliseconds)."
274283
},
275-
"uncompressed_size": {
284+
"uncompressed_size_bytes": {
276285
"type": "integer",
277286
"format": "int64",
278-
"description": "Total uncompressed size of input files (bytes)."
287+
"description": "Total uncompressed size of input files, in bytes."
279288
}
280289
}
281290
},

0 commit comments

Comments
 (0)