15
15
import simvue .api .objects as sv_api_obj
16
16
from simvue .api .objects .alert .base import AlertBase
17
17
18
+
18
19
@pytest .mark .dependency
19
20
@pytest .mark .client
20
21
def test_get_events (create_test_run : tuple [sv_run .Run , dict ]) -> None :
@@ -24,12 +25,8 @@ def test_get_events(create_test_run: tuple[sv_run.Run, dict]) -> None:
24
25
25
26
@pytest .mark .dependency
26
27
@pytest .mark .client
27
- @pytest .mark .parametrize (
28
- "from_run" , (True , False ), ids = ("from_run" , "all_runs" )
29
- )
30
- @pytest .mark .parametrize (
31
- "names_only" , (True , False ), ids = ("names_only" , "all_details" )
32
- )
28
+ @pytest .mark .parametrize ("from_run" , (True , False ), ids = ("from_run" , "all_runs" ))
29
+ @pytest .mark .parametrize ("names_only" , (True , False ), ids = ("names_only" , "all_details" ))
33
30
@pytest .mark .parametrize (
34
31
"critical_only" , (True , False ), ids = ("critical_only" , "all_states" )
35
32
)
@@ -43,42 +40,44 @@ def test_get_alerts(
43
40
run_id = run .id
44
41
unique_id = f"{ uuid .uuid4 ()} " .split ("-" )[0 ]
45
42
_id_1 = run .create_user_alert (
46
- name = f"user_alert_1_{ unique_id } " ,
43
+ name = f"user_alert_1_{ unique_id } " ,
47
44
)
48
45
run .create_user_alert (
49
- name = f"user_alert_2_{ unique_id } " ,
50
- )
51
- run .create_user_alert (
52
- name = f"user_alert_3_{ unique_id } " ,
53
- attach_to_run = False
46
+ name = f"user_alert_2_{ unique_id } " ,
54
47
)
48
+ run .create_user_alert (name = f"user_alert_3_{ unique_id } " , attach_to_run = False )
55
49
run .log_alert (identifier = _id_1 , state = "critical" )
56
50
time .sleep (2 )
57
51
run .close ()
58
-
52
+
59
53
client = svc .Client ()
60
54
61
55
if critical_only and not from_run :
62
56
with pytest .raises (RuntimeError ) as e :
63
- _alerts = client .get_alerts (critical_only = critical_only , names_only = names_only )
64
- assert "critical_only is ambiguous when returning alerts with no run ID specified." in str (e .value )
57
+ _alerts = client .get_alerts (
58
+ critical_only = critical_only , names_only = names_only
59
+ )
60
+ assert (
61
+ "critical_only is ambiguous when returning alerts with no run ID specified."
62
+ in str (e .value )
63
+ )
65
64
else :
66
65
sorting = None if run_id else [("name" , True ), ("created" , True )]
67
66
_alerts = client .get_alerts (
68
67
run_id = run_id if from_run else None ,
69
68
critical_only = critical_only ,
70
69
names_only = names_only ,
71
- sort_by_columns = sorting
70
+ sort_by_columns = sorting ,
72
71
)
73
-
72
+
74
73
if names_only :
75
74
assert all (isinstance (item , str ) for item in _alerts )
76
75
else :
77
- assert all (isinstance (item , AlertBase ) for item in _alerts )
76
+ assert all (isinstance (item , AlertBase ) for item in _alerts )
78
77
_alerts = [alert .name for alert in _alerts ]
79
-
78
+
80
79
assert f"user_alert_1_{ unique_id } " in _alerts
81
-
80
+
82
81
if not from_run :
83
82
assert len (_alerts ) > 2
84
83
assert f"user_alert_3_{ unique_id } " in _alerts
@@ -90,6 +89,7 @@ def test_get_alerts(
90
89
assert len (_alerts ) == 2
91
90
assert f"user_alert_2_{ unique_id } " in _alerts
92
91
92
+
93
93
@pytest .mark .dependency
94
94
@pytest .mark .client
95
95
def test_get_run_id_from_name (create_test_run : tuple [sv_run .Run , dict ]) -> None :
@@ -104,12 +104,8 @@ def test_get_run_id_from_name(create_test_run: tuple[sv_run.Run, dict]) -> None:
104
104
@pytest .mark .client
105
105
@pytest .mark .parametrize (
106
106
"aggregate,use_name_labels" ,
107
- [
108
- (True , False ),
109
- (False , False ),
110
- (False , True )
111
- ],
112
- ids = ("aggregate" , "complete_ids" , "complete_labels" )
107
+ [(True , False ), (False , False ), (False , True )],
108
+ ids = ("aggregate" , "complete_ids" , "complete_labels" ),
113
109
)
114
110
def test_get_metric_values (
115
111
create_test_run : tuple [sv_run .Run , dict ], aggregate : bool , use_name_labels : bool
@@ -130,9 +126,9 @@ def test_get_metric_values(
130
126
assert create_test_run [1 ]["metrics" ][0 ] in _metrics_dict .keys ()
131
127
if aggregate :
132
128
_value_types = {i [1 ] for i in _first_entry }
133
- assert all (
134
- i in _value_types for i in ( " average" , " min" , " max" )
135
- ), f"Expected ('average', 'min', 'max') in { _value_types } "
129
+ assert all (i in _value_types for i in ( "average" , "min" , "max" )), (
130
+ f"Expected (' average', ' min', ' max') in { _value_types } "
131
+ )
136
132
elif not use_name_labels :
137
133
_runs = {i [1 ] for i in _first_entry }
138
134
assert create_test_run [1 ]["run_id" ] in _runs
@@ -157,12 +153,17 @@ def test_plot_metrics(create_test_run: tuple[sv_run.Run, dict]) -> None:
157
153
@pytest .mark .dependency
158
154
@pytest .mark .client
159
155
@pytest .mark .parametrize (
160
- "sorting" , ([("metadata.test_identifier" , True )], [("name" , True ), ("created" , True )], None ),
161
- ids = ("sorted-metadata" , "sorted-name-created" , None )
156
+ "sorting" ,
157
+ ([("metadata.test_identifier" , True )], [("name" , True ), ("created" , True )], None ),
158
+ ids = ("sorted-metadata" , "sorted-name-created" , None ),
162
159
)
163
- def test_get_artifacts_entries (create_test_run : tuple [sv_run .Run , dict ], sorting : list [tuple [str , bool ]] | None ) -> None :
160
+ def test_get_artifacts_entries (
161
+ create_test_run : tuple [sv_run .Run , dict ], sorting : list [tuple [str , bool ]] | None
162
+ ) -> None :
164
163
client = svc .Client ()
165
- assert dict (client .list_artifacts (create_test_run [1 ]["run_id" ], sort_by_columns = sorting ))
164
+ assert dict (
165
+ client .list_artifacts (create_test_run [1 ]["run_id" ], sort_by_columns = sorting )
166
+ )
166
167
assert client .get_artifact (create_test_run [1 ]["run_id" ], name = "test_attributes" )
167
168
168
169
@@ -180,7 +181,9 @@ def test_get_artifact_as_file(
180
181
name = _file_name ,
181
182
output_dir = tempd ,
182
183
)
183
- assert pathlib .Path (tempd ).joinpath (_file_name ).exists (), f"Failed to download '{ _file_name } '"
184
+ assert pathlib .Path (tempd ).joinpath (_file_name ).exists (), (
185
+ f"Failed to download '{ _file_name } '"
186
+ )
184
187
185
188
186
189
@pytest .mark .dependency
@@ -196,7 +199,7 @@ def test_get_artifacts_as_files(
196
199
create_test_run [1 ]["run_id" ], category = category , output_dir = tempd
197
200
)
198
201
files = [os .path .basename (i ) for i in glob .glob (os .path .join (tempd , "*" ))]
199
-
202
+
200
203
if not category :
201
204
expected_files = ["file_1" , "file_2" , "file_3" ]
202
205
elif category == "input" :
@@ -205,7 +208,7 @@ def test_get_artifacts_as_files(
205
208
expected_files = ["file_2" ]
206
209
elif category == "code" :
207
210
expected_files = ["file_3" ]
208
-
211
+
209
212
for file in ["file_1" , "file_2" , "file_3" ]:
210
213
if file in expected_files :
211
214
assert create_test_run [1 ][file ] in files
@@ -222,12 +225,18 @@ def test_get_artifacts_as_files(
222
225
("dataframe" , [("created" , True ), ("started" , True )]),
223
226
("objects" , [("metadata.test_identifier" , True )]),
224
227
],
225
- ids = ("dict-unsorted" , "dataframe-datesorted" , "objects-metasorted" )
228
+ ids = ("dict-unsorted" , "dataframe-datesorted" , "objects-metasorted" ),
226
229
)
227
- def test_get_runs (create_test_run : tuple [sv_run .Run , dict ], output_format : str , sorting : list [tuple [str , bool ]] | None ) -> None :
230
+ def test_get_runs (
231
+ create_test_run : tuple [sv_run .Run , dict ],
232
+ output_format : str ,
233
+ sorting : list [tuple [str , bool ]] | None ,
234
+ ) -> None :
228
235
client = svc .Client ()
229
236
230
- _result = client .get_runs (filters = [], output_format = output_format , count_limit = 10 , sort_by_columns = sorting )
237
+ _result = client .get_runs (
238
+ filters = [], output_format = output_format , count_limit = 10 , sort_by_columns = sorting
239
+ )
231
240
232
241
if output_format == "dataframe" :
233
242
assert not _result .empty
@@ -245,10 +254,13 @@ def test_get_run(create_test_run: tuple[sv_run.Run, dict]) -> None:
245
254
@pytest .mark .dependency
246
255
@pytest .mark .client
247
256
@pytest .mark .parametrize (
248
- "sorting" , (None , [("metadata.test_identifier" , True ), ("path" , True )], [("modified" , False )]),
249
- ids = ("no-sort" , "sort-path-metadata" , "sort-modified" )
257
+ "sorting" ,
258
+ (None , [("metadata.test_identifier" , True ), ("path" , True )], [("modified" , False )]),
259
+ ids = ("no-sort" , "sort-path-metadata" , "sort-modified" ),
250
260
)
251
- def test_get_folders (create_test_run : tuple [sv_run .Run , dict ], sorting : list [tuple [str , bool ]] | None ) -> None :
261
+ def test_get_folders (
262
+ create_test_run : tuple [sv_run .Run , dict ], sorting : list [tuple [str , bool ]] | None
263
+ ) -> None :
252
264
client = svc .Client ()
253
265
assert (folders := client .get_folders (sort_by_columns = sorting ))
254
266
_id , _folder = next (folders )
@@ -277,7 +289,12 @@ def test_get_tag(create_plain_run: tuple[sv_run.Run, dict]) -> None:
277
289
@pytest .mark .client
278
290
def test_run_deletion () -> None :
279
291
run = sv_run .Run ()
280
- run .init (name = "test_run_deletion" , folder = "/simvue_unit_testing" , tags = ["test_run_deletion" ], retention_period = "1 min" )
292
+ run .init (
293
+ name = "test_run_deletion" ,
294
+ folder = "/simvue_unit_testing" ,
295
+ tags = ["test_run_deletion" ],
296
+ retention_period = "1 min" ,
297
+ )
281
298
run .log_metrics ({"x" : 2 })
282
299
run .close ()
283
300
client = svc .Client ()
@@ -291,13 +308,18 @@ def test_run_deletion() -> None:
291
308
def test_runs_deletion () -> None :
292
309
_runs = [sv_run .Run () for _ in range (5 )]
293
310
for i , run in enumerate (_runs ):
294
- run .init (name = "test_runs_deletion" , folder = "/simvue_unit_testing/runs_batch" , tags = ["test_runs_deletion" ], retention_period = "1 min" )
311
+ run .init (
312
+ name = "test_runs_deletion" ,
313
+ folder = "/simvue_unit_testing/runs_batch" ,
314
+ tags = ["test_runs_deletion" ],
315
+ retention_period = "1 min" ,
316
+ )
295
317
run .log_metrics ({"x" : i })
296
318
client = svc .Client ()
297
319
assert len (client .delete_runs ("/simvue_unit_testing/runs_batch" )) > 0
298
320
for run in _runs :
299
321
with pytest .raises (ObjectNotFoundError ):
300
- client .get_run (run .id )
322
+ client .get_run (run .id )
301
323
302
324
303
325
@pytest .mark .dependency
@@ -316,11 +338,24 @@ def test_get_tags(create_plain_run: tuple[sv_run.Run, dict]) -> None:
316
338
@pytest .mark .client
317
339
def test_folder_deletion () -> None :
318
340
run = sv_run .Run ()
319
- run .init (name = "test_folder_deletion" , folder = "/simvue_unit_testing/delete_me" , tags = ["test_folder_deletion" ], retention_period = "1 min" )
341
+ _temp_folder_id : str = f"{ uuid .uuid4 ()} " .split ()[0 ]
342
+ run .init (
343
+ name = "test_folder_deletion" ,
344
+ folder = f"/simvue_unit_testing/{ _temp_folder_id } " ,
345
+ tags = ["test_folder_deletion" ],
346
+ retention_period = "1 min" ,
347
+ )
320
348
run .close ()
321
349
client = svc .Client ()
322
350
# This test is called last, one run created so expect length 1
323
- assert len (client .delete_folder ("/simvue_unit_testing/delete_me" , remove_runs = True )) == 1
351
+ assert (
352
+ len (
353
+ client .delete_folder (
354
+ f"/simvue_unit_testing/{ _temp_folder_id } " , remove_runs = True
355
+ )
356
+ )
357
+ == 1
358
+ )
324
359
time .sleep (10 )
325
360
with pytest .raises (ObjectNotFoundError ):
326
361
client .get_folder ("/simvue_unit_testing/delete_me" )
@@ -332,19 +367,24 @@ def test_folder_deletion() -> None:
332
367
def test_run_folder_metadata_find (create_plain_run : tuple [sv_run .Run , dict ]) -> None :
333
368
run , run_data = create_plain_run
334
369
rand_val = random .randint (0 , 1000 )
335
- run .set_folder_details (metadata = {' atest' : rand_val })
370
+ run .set_folder_details (metadata = {" atest" : rand_val })
336
371
run .close ()
337
372
time .sleep (1.0 )
338
373
client = svc .Client ()
339
- data = client .get_folders (filters = [f' metadata.atest == { rand_val } ' ])
374
+ data = client .get_folders (filters = [f" metadata.atest == { rand_val } " ])
340
375
341
376
assert run_data ["folder" ] in [i .path for _ , i in data ]
342
377
343
378
344
379
@pytest .mark .client
345
380
def test_tag_deletion () -> None :
346
381
run = sv_run .Run ()
347
- run .init (name = "test_folder_deletion" , folder = "/simvue_unit_testing/delete_me" , tags = ["test_tag_deletion" ], retention_period = "1 min" )
382
+ run .init (
383
+ name = "test_folder_deletion" ,
384
+ folder = "/simvue_unit_testing" ,
385
+ tags = ["test_tag_deletion" ],
386
+ retention_period = "1 min" ,
387
+ )
348
388
run .close ()
349
389
unique_id = f"{ uuid .uuid4 ()} " .split ("-" )[0 ]
350
390
run .update_tags ([(tag_str := f"delete_me_{ unique_id } " )])
@@ -398,7 +438,9 @@ def test_multiple_metric_retrieval(
398
438
399
439
@pytest .mark .client
400
440
def test_alert_deletion () -> None :
401
- _alert = sv_api_obj .UserAlert .new (name = "test_alert" , notification = "none" , description = None )
441
+ _alert = sv_api_obj .UserAlert .new (
442
+ name = "test_alert" , notification = "none" , description = None
443
+ )
402
444
_alert .commit ()
403
445
_client = svc .Client ()
404
446
_client .delete_alert (alert_id = _alert .id )
@@ -422,5 +464,3 @@ def test_abort_run(speedy_heartbeat, create_plain_run: tuple[sv_run.Run, dict])
422
464
except AssertionError :
423
465
time .sleep (2 )
424
466
assert run ._status == "terminated"
425
-
426
-
0 commit comments