15
15
import simvue .api .objects as sv_api_obj
16
16
from simvue .api .objects .alert .base import AlertBase
17
17
18
-
19
18
@pytest .mark .dependency
20
19
@pytest .mark .client
21
20
def test_get_events (create_test_run : tuple [sv_run .Run , dict ]) -> None :
@@ -25,8 +24,12 @@ def test_get_events(create_test_run: tuple[sv_run.Run, dict]) -> None:
25
24
26
25
@pytest .mark .dependency
27
26
@pytest .mark .client
28
- @pytest .mark .parametrize ("from_run" , (True , False ), ids = ("from_run" , "all_runs" ))
29
- @pytest .mark .parametrize ("names_only" , (True , False ), ids = ("names_only" , "all_details" ))
27
+ @pytest .mark .parametrize (
28
+ "from_run" , (True , False ), ids = ("from_run" , "all_runs" )
29
+ )
30
+ @pytest .mark .parametrize (
31
+ "names_only" , (True , False ), ids = ("names_only" , "all_details" )
32
+ )
30
33
@pytest .mark .parametrize (
31
34
"critical_only" , (True , False ), ids = ("critical_only" , "all_states" )
32
35
)
@@ -40,44 +43,42 @@ def test_get_alerts(
40
43
run_id = run .id
41
44
unique_id = f"{ uuid .uuid4 ()} " .split ("-" )[0 ]
42
45
_id_1 = run .create_user_alert (
43
- name = f"user_alert_1_{ unique_id } " ,
46
+ name = f"user_alert_1_{ unique_id } " ,
44
47
)
45
48
run .create_user_alert (
46
- name = f"user_alert_2_{ unique_id } " ,
49
+ name = f"user_alert_2_{ unique_id } " ,
50
+ )
51
+ run .create_user_alert (
52
+ name = f"user_alert_3_{ unique_id } " ,
53
+ attach_to_run = False
47
54
)
48
- run .create_user_alert (name = f"user_alert_3_{ unique_id } " , attach_to_run = False )
49
55
run .log_alert (identifier = _id_1 , state = "critical" )
50
56
time .sleep (2 )
51
57
run .close ()
52
-
58
+
53
59
client = svc .Client ()
54
60
55
61
if critical_only and not from_run :
56
62
with pytest .raises (RuntimeError ) as e :
57
- _alerts = client .get_alerts (
58
- critical_only = critical_only , names_only = names_only
59
- )
60
- assert (
61
- "critical_only is ambiguous when returning alerts with no run ID specified."
62
- in str (e .value )
63
- )
63
+ _alerts = client .get_alerts (critical_only = critical_only , names_only = names_only )
64
+ assert "critical_only is ambiguous when returning alerts with no run ID specified." in str (e .value )
64
65
else :
65
66
sorting = None if run_id else [("name" , True ), ("created" , True )]
66
67
_alerts = client .get_alerts (
67
68
run_id = run_id if from_run else None ,
68
69
critical_only = critical_only ,
69
70
names_only = names_only ,
70
- sort_by_columns = sorting ,
71
+ sort_by_columns = sorting
71
72
)
72
-
73
+
73
74
if names_only :
74
75
assert all (isinstance (item , str ) for item in _alerts )
75
76
else :
76
- assert all (isinstance (item , AlertBase ) for item in _alerts )
77
+ assert all (isinstance (item , AlertBase ) for item in _alerts )
77
78
_alerts = [alert .name for alert in _alerts ]
78
-
79
+
79
80
assert f"user_alert_1_{ unique_id } " in _alerts
80
-
81
+
81
82
if not from_run :
82
83
assert len (_alerts ) > 2
83
84
assert f"user_alert_3_{ unique_id } " in _alerts
@@ -89,7 +90,6 @@ def test_get_alerts(
89
90
assert len (_alerts ) == 2
90
91
assert f"user_alert_2_{ unique_id } " in _alerts
91
92
92
-
93
93
@pytest .mark .dependency
94
94
@pytest .mark .client
95
95
def test_get_run_id_from_name (create_test_run : tuple [sv_run .Run , dict ]) -> None :
@@ -104,8 +104,12 @@ def test_get_run_id_from_name(create_test_run: tuple[sv_run.Run, dict]) -> None:
104
104
@pytest .mark .client
105
105
@pytest .mark .parametrize (
106
106
"aggregate,use_name_labels" ,
107
- [(True , False ), (False , False ), (False , True )],
108
- ids = ("aggregate" , "complete_ids" , "complete_labels" ),
107
+ [
108
+ (True , False ),
109
+ (False , False ),
110
+ (False , True )
111
+ ],
112
+ ids = ("aggregate" , "complete_ids" , "complete_labels" )
109
113
)
110
114
def test_get_metric_values (
111
115
create_test_run : tuple [sv_run .Run , dict ], aggregate : bool , use_name_labels : bool
@@ -126,9 +130,9 @@ def test_get_metric_values(
126
130
assert create_test_run [1 ]["metrics" ][0 ] in _metrics_dict .keys ()
127
131
if aggregate :
128
132
_value_types = {i [1 ] for i in _first_entry }
129
- assert all (i in _value_types for i in ( "average" , "min" , "max" )), (
130
- f"Expected (' average', ' min', ' max') in { _value_types } "
131
- )
133
+ assert all (
134
+ i in _value_types for i in ( " average" , " min" , " max" )
135
+ ), f"Expected ('average', 'min', 'max') in { _value_types } "
132
136
elif not use_name_labels :
133
137
_runs = {i [1 ] for i in _first_entry }
134
138
assert create_test_run [1 ]["run_id" ] in _runs
@@ -153,17 +157,12 @@ def test_plot_metrics(create_test_run: tuple[sv_run.Run, dict]) -> None:
153
157
@pytest .mark .dependency
154
158
@pytest .mark .client
155
159
@pytest .mark .parametrize (
156
- "sorting" ,
157
- ([("metadata.test_identifier" , True )], [("name" , True ), ("created" , True )], None ),
158
- ids = ("sorted-metadata" , "sorted-name-created" , None ),
160
+ "sorting" , ([("metadata.test_identifier" , True )], [("name" , True ), ("created" , True )], None ),
161
+ ids = ("sorted-metadata" , "sorted-name-created" , None )
159
162
)
160
- def test_get_artifacts_entries (
161
- create_test_run : tuple [sv_run .Run , dict ], sorting : list [tuple [str , bool ]] | None
162
- ) -> None :
163
+ def test_get_artifacts_entries (create_test_run : tuple [sv_run .Run , dict ], sorting : list [tuple [str , bool ]] | None ) -> None :
163
164
client = svc .Client ()
164
- assert dict (
165
- client .list_artifacts (create_test_run [1 ]["run_id" ], sort_by_columns = sorting )
166
- )
165
+ assert dict (client .list_artifacts (create_test_run [1 ]["run_id" ], sort_by_columns = sorting ))
167
166
assert client .get_artifact (create_test_run [1 ]["run_id" ], name = "test_attributes" )
168
167
169
168
@@ -181,9 +180,7 @@ def test_get_artifact_as_file(
181
180
name = _file_name ,
182
181
output_dir = tempd ,
183
182
)
184
- assert pathlib .Path (tempd ).joinpath (_file_name ).exists (), (
185
- f"Failed to download '{ _file_name } '"
186
- )
183
+ assert pathlib .Path (tempd ).joinpath (_file_name ).exists (), f"Failed to download '{ _file_name } '"
187
184
188
185
189
186
@pytest .mark .dependency
@@ -199,7 +196,7 @@ def test_get_artifacts_as_files(
199
196
create_test_run [1 ]["run_id" ], category = category , output_dir = tempd
200
197
)
201
198
files = [os .path .basename (i ) for i in glob .glob (os .path .join (tempd , "*" ))]
202
-
199
+
203
200
if not category :
204
201
expected_files = ["file_1" , "file_2" , "file_3" ]
205
202
elif category == "input" :
@@ -208,7 +205,7 @@ def test_get_artifacts_as_files(
208
205
expected_files = ["file_2" ]
209
206
elif category == "code" :
210
207
expected_files = ["file_3" ]
211
-
208
+
212
209
for file in ["file_1" , "file_2" , "file_3" ]:
213
210
if file in expected_files :
214
211
assert create_test_run [1 ][file ] in files
@@ -225,18 +222,12 @@ def test_get_artifacts_as_files(
225
222
("dataframe" , [("created" , True ), ("started" , True )]),
226
223
("objects" , [("metadata.test_identifier" , True )]),
227
224
],
228
- ids = ("dict-unsorted" , "dataframe-datesorted" , "objects-metasorted" ),
225
+ ids = ("dict-unsorted" , "dataframe-datesorted" , "objects-metasorted" )
229
226
)
230
- def test_get_runs (
231
- create_test_run : tuple [sv_run .Run , dict ],
232
- output_format : str ,
233
- sorting : list [tuple [str , bool ]] | None ,
234
- ) -> None :
227
+ def test_get_runs (create_test_run : tuple [sv_run .Run , dict ], output_format : str , sorting : list [tuple [str , bool ]] | None ) -> None :
235
228
client = svc .Client ()
236
229
237
- _result = client .get_runs (
238
- filters = [], output_format = output_format , count_limit = 10 , sort_by_columns = sorting
239
- )
230
+ _result = client .get_runs (filters = [], output_format = output_format , count_limit = 10 , sort_by_columns = sorting )
240
231
241
232
if output_format == "dataframe" :
242
233
assert not _result .empty
@@ -254,13 +245,10 @@ def test_get_run(create_test_run: tuple[sv_run.Run, dict]) -> None:
254
245
@pytest .mark .dependency
255
246
@pytest .mark .client
256
247
@pytest .mark .parametrize (
257
- "sorting" ,
258
- (None , [("metadata.test_identifier" , True ), ("path" , True )], [("modified" , False )]),
259
- ids = ("no-sort" , "sort-path-metadata" , "sort-modified" ),
248
+ "sorting" , (None , [("metadata.test_identifier" , True ), ("path" , True )], [("modified" , False )]),
249
+ ids = ("no-sort" , "sort-path-metadata" , "sort-modified" )
260
250
)
261
- def test_get_folders (
262
- create_test_run : tuple [sv_run .Run , dict ], sorting : list [tuple [str , bool ]] | None
263
- ) -> None :
251
+ def test_get_folders (create_test_run : tuple [sv_run .Run , dict ], sorting : list [tuple [str , bool ]] | None ) -> None :
264
252
client = svc .Client ()
265
253
assert (folders := client .get_folders (sort_by_columns = sorting ))
266
254
_id , _folder = next (folders )
@@ -289,12 +277,7 @@ def test_get_tag(create_plain_run: tuple[sv_run.Run, dict]) -> None:
289
277
@pytest .mark .client
290
278
def test_run_deletion () -> None :
291
279
run = sv_run .Run ()
292
- run .init (
293
- name = "test_run_deletion" ,
294
- folder = "/simvue_unit_testing" ,
295
- tags = ["test_run_deletion" ],
296
- retention_period = "1 min" ,
297
- )
280
+ run .init (name = "test_run_deletion" , folder = "/simvue_unit_testing" , tags = ["test_run_deletion" ], retention_period = "1 min" )
298
281
run .log_metrics ({"x" : 2 })
299
282
run .close ()
300
283
client = svc .Client ()
@@ -308,18 +291,13 @@ def test_run_deletion() -> None:
308
291
def test_runs_deletion () -> None :
309
292
_runs = [sv_run .Run () for _ in range (5 )]
310
293
for i , run in enumerate (_runs ):
311
- run .init (
312
- name = "test_runs_deletion" ,
313
- folder = "/simvue_unit_testing/runs_batch" ,
314
- tags = ["test_runs_deletion" ],
315
- retention_period = "1 min" ,
316
- )
294
+ run .init (name = "test_runs_deletion" , folder = "/simvue_unit_testing/runs_batch" , tags = ["test_runs_deletion" ], retention_period = "1 min" )
317
295
run .log_metrics ({"x" : i })
318
296
client = svc .Client ()
319
297
assert len (client .delete_runs ("/simvue_unit_testing/runs_batch" )) > 0
320
298
for run in _runs :
321
299
with pytest .raises (ObjectNotFoundError ):
322
- client .get_run (run .id )
300
+ client .get_run (run .id )
323
301
324
302
325
303
@pytest .mark .dependency
@@ -339,26 +317,14 @@ def test_get_tags(create_plain_run: tuple[sv_run.Run, dict]) -> None:
339
317
def test_folder_deletion () -> None :
340
318
run = sv_run .Run ()
341
319
_temp_folder_id : str = f"{ uuid .uuid4 ()} " .split ()[0 ]
342
- run .init (
343
- name = "test_folder_deletion" ,
344
- folder = f"/simvue_unit_testing/{ _temp_folder_id } " ,
345
- tags = ["test_folder_deletion" ],
346
- retention_period = "1 min" ,
347
- )
320
+ run .init (name = "test_folder_deletion" , folder = f"/simvue_unit_testing/{ _temp_folder_id } " , tags = ["test_folder_deletion" ], retention_period = "1 min" )
348
321
run .close ()
349
322
client = svc .Client ()
350
323
# This test is called last, one run created so expect length 1
351
- assert (
352
- len (
353
- client .delete_folder (
354
- f"/simvue_unit_testing/{ _temp_folder_id } " , remove_runs = True
355
- )
356
- )
357
- == 1
358
- )
359
- time .sleep (10 )
360
- with pytest .raises (ObjectNotFoundError ):
361
- client .get_folder ("/simvue_unit_testing/delete_me" )
324
+ assert len (client .delete_folder (f"/simvue_unit_testing/{ _temp_folder_id } " , remove_runs = True )) == 1
325
+
326
+ # If the folder has been deleted then an ObjectNotFoundError should be raised
327
+ assert not client .get_folder (f"/simvue_unit_testing/{ _temp_folder_id } " )
362
328
with pytest .raises (ObjectNotFoundError ):
363
329
client .get_run (run_id = run .id )
364
330
@@ -367,24 +333,19 @@ def test_folder_deletion() -> None:
367
333
def test_run_folder_metadata_find (create_plain_run : tuple [sv_run .Run , dict ]) -> None :
368
334
run , run_data = create_plain_run
369
335
rand_val = random .randint (0 , 1000 )
370
- run .set_folder_details (metadata = {" atest" : rand_val })
336
+ run .set_folder_details (metadata = {' atest' : rand_val })
371
337
run .close ()
372
338
time .sleep (1.0 )
373
339
client = svc .Client ()
374
- data = client .get_folders (filters = [f" metadata.atest == { rand_val } " ])
340
+ data = client .get_folders (filters = [f' metadata.atest == { rand_val } ' ])
375
341
376
342
assert run_data ["folder" ] in [i .path for _ , i in data ]
377
343
378
344
379
345
@pytest .mark .client
380
346
def test_tag_deletion () -> None :
381
347
run = sv_run .Run ()
382
- run .init (
383
- name = "test_folder_deletion" ,
384
- folder = "/simvue_unit_testing" ,
385
- tags = ["test_tag_deletion" ],
386
- retention_period = "1 min" ,
387
- )
348
+ run .init (name = "test_folder_deletion" , folder = "/simvue_unit_testing" , tags = ["test_tag_deletion" ], retention_period = "1 min" )
388
349
run .close ()
389
350
unique_id = f"{ uuid .uuid4 ()} " .split ("-" )[0 ]
390
351
run .update_tags ([(tag_str := f"delete_me_{ unique_id } " )])
@@ -438,9 +399,7 @@ def test_multiple_metric_retrieval(
438
399
439
400
@pytest .mark .client
440
401
def test_alert_deletion () -> None :
441
- _alert = sv_api_obj .UserAlert .new (
442
- name = "test_alert" , notification = "none" , description = None
443
- )
402
+ _alert = sv_api_obj .UserAlert .new (name = "test_alert" , notification = "none" , description = None )
444
403
_alert .commit ()
445
404
_client = svc .Client ()
446
405
_client .delete_alert (alert_id = _alert .id )
@@ -464,3 +423,5 @@ def test_abort_run(speedy_heartbeat, create_plain_run: tuple[sv_run.Run, dict])
464
423
except AssertionError :
465
424
time .sleep (2 )
466
425
assert run ._status == "terminated"
426
+
427
+
0 commit comments