17
17
import simvue .api .objects as sv_api_obj
18
18
from simvue .api .objects .alert .base import AlertBase
19
19
20
+
20
21
@pytest .mark .client
21
22
def test_get_events (create_test_run : tuple [sv_run .Run , dict ]) -> None :
22
23
client = svc .Client ()
23
24
assert client .get_events (run_id = create_test_run [1 ]["run_id" ])
24
25
25
26
26
27
@pytest .mark .client
27
- @pytest .mark .parametrize (
28
- "from_run" , (True , False ), ids = ("from_run" , "all_runs" )
29
- )
30
- @pytest .mark .parametrize (
31
- "names_only" , (True , False ), ids = ("names_only" , "all_details" )
32
- )
28
+ @pytest .mark .parametrize ("from_run" , (True , False ), ids = ("from_run" , "all_runs" ))
29
+ @pytest .mark .parametrize ("names_only" , (True , False ), ids = ("names_only" , "all_details" ))
33
30
@pytest .mark .parametrize (
34
31
"critical_only" , (True , False ), ids = ("critical_only" , "all_states" )
35
32
)
36
33
def test_get_alerts (
37
- create_plain_run : tuple [sv_run .Run , dict ],
38
34
from_run : bool ,
39
35
names_only : bool ,
40
36
critical_only : bool ,
41
37
) -> None :
42
- run , run_data = create_plain_run
43
- run_id = run .id
44
38
unique_id = f"{ uuid .uuid4 ()} " .split ("-" )[0 ]
45
- _id_1 = run .create_user_alert (
46
- name = f"user_alert_1_{ unique_id } " ,
39
+ run = sv_run .Run ()
40
+ run .init (
41
+ "test_get_alerts" ,
42
+ folder = f"/simvue_unit_testing/{ unique_id } " ,
43
+ tags = ["test_get_alerts" ],
44
+ retention_period = "2 mins" ,
47
45
)
48
- run .create_user_alert (
49
- name = f"user_alert_2_{ unique_id } " ,
46
+ run_id = run .id
47
+ _id_1 = run .create_user_alert (
48
+ name = f"user_alert_1_{ unique_id } " ,
50
49
)
51
50
run .create_user_alert (
52
- name = f"user_alert_3_{ unique_id } " ,
53
- attach_to_run = False
51
+ name = f"user_alert_2_{ unique_id } " ,
54
52
)
53
+ run .create_user_alert (name = f"user_alert_3_{ unique_id } " , attach_to_run = False )
55
54
run .log_alert (identifier = _id_1 , state = "critical" )
56
55
time .sleep (2 )
57
56
run .close ()
58
-
57
+
59
58
client = svc .Client ()
60
59
61
60
if critical_only and not from_run :
62
61
with pytest .raises (RuntimeError ) as e :
63
- _alerts = client .get_alerts (critical_only = critical_only , names_only = names_only )
64
- assert "critical_only is ambiguous when returning alerts with no run ID specified." in str (e .value )
62
+ _alerts = client .get_alerts (
63
+ critical_only = critical_only , names_only = names_only
64
+ )
65
+ assert (
66
+ "critical_only is ambiguous when returning alerts with no run ID specified."
67
+ in str (e .value )
68
+ )
65
69
else :
66
70
sorting = None if run_id else [("name" , True ), ("created" , True )]
67
71
_alerts = client .get_alerts (
68
72
run_id = run_id if from_run else None ,
69
73
critical_only = critical_only ,
70
74
names_only = names_only ,
71
- sort_by_columns = sorting
75
+ sort_by_columns = sorting ,
72
76
)
73
-
77
+
74
78
if names_only :
75
79
assert all (isinstance (item , str ) for item in _alerts )
76
80
else :
77
- assert all (isinstance (item , AlertBase ) for item in _alerts )
81
+ assert all (isinstance (item , AlertBase ) for item in _alerts )
78
82
_alerts = [alert .name for alert in _alerts ]
79
-
83
+
80
84
assert f"user_alert_1_{ unique_id } " in _alerts
81
-
85
+
82
86
if not from_run :
83
87
assert len (_alerts ) > 2
84
88
assert f"user_alert_3_{ unique_id } " in _alerts
@@ -90,6 +94,7 @@ def test_get_alerts(
90
94
assert len (_alerts ) == 2
91
95
assert f"user_alert_2_{ unique_id } " in _alerts
92
96
97
+
93
98
@pytest .mark .client
94
99
def test_get_run_id_from_name (create_test_run : tuple [sv_run .Run , dict ]) -> None :
95
100
client = svc .Client ()
@@ -102,12 +107,8 @@ def test_get_run_id_from_name(create_test_run: tuple[sv_run.Run, dict]) -> None:
102
107
@pytest .mark .client
103
108
@pytest .mark .parametrize (
104
109
"aggregate,use_name_labels" ,
105
- [
106
- (True , False ),
107
- (False , False ),
108
- (False , True )
109
- ],
110
- ids = ("aggregate" , "complete_ids" , "complete_labels" )
110
+ [(True , False ), (False , False ), (False , True )],
111
+ ids = ("aggregate" , "complete_ids" , "complete_labels" ),
111
112
)
112
113
def test_get_metric_values (
113
114
create_test_run : tuple [sv_run .Run , dict ], aggregate : bool , use_name_labels : bool
@@ -128,9 +129,9 @@ def test_get_metric_values(
128
129
assert create_test_run [1 ]["metrics" ][0 ] in _metrics_dict .keys ()
129
130
if aggregate :
130
131
_value_types = {i [1 ] for i in _first_entry }
131
- assert all (
132
- i in _value_types for i in ( " average" , " min" , " max" )
133
- ), f"Expected ('average', 'min', 'max') in { _value_types } "
132
+ assert all (i in _value_types for i in ( "average" , "min" , "max" )), (
133
+ f"Expected (' average', ' min', ' max') in { _value_types } "
134
+ )
134
135
elif not use_name_labels :
135
136
_runs = {i [1 ] for i in _first_entry }
136
137
assert create_test_run [1 ]["run_id" ] in _runs
@@ -153,12 +154,17 @@ def test_plot_metrics(create_test_run: tuple[sv_run.Run, dict]) -> None:
153
154
154
155
@pytest .mark .client
155
156
@pytest .mark .parametrize (
156
- "sorting" , ([("metadata.test_identifier" , True )], [("name" , True ), ("created" , True )], None ),
157
- ids = ("sorted-metadata" , "sorted-name-created" , None )
157
+ "sorting" ,
158
+ ([("metadata.test_identifier" , True )], [("name" , True ), ("created" , True )], None ),
159
+ ids = ("sorted-metadata" , "sorted-name-created" , None ),
158
160
)
159
- def test_get_artifacts_entries (create_test_run : tuple [sv_run .Run , dict ], sorting : list [tuple [str , bool ]] | None ) -> None :
161
+ def test_get_artifacts_entries (
162
+ create_test_run : tuple [sv_run .Run , dict ], sorting : list [tuple [str , bool ]] | None
163
+ ) -> None :
160
164
client = svc .Client ()
161
- assert dict (client .list_artifacts (create_test_run [1 ]["run_id" ], sort_by_columns = sorting ))
165
+ assert dict (
166
+ client .list_artifacts (create_test_run [1 ]["run_id" ], sort_by_columns = sorting )
167
+ )
162
168
assert client .get_artifact (create_test_run [1 ]["run_id" ], name = "test_attributes" )
163
169
164
170
@@ -175,7 +181,9 @@ def test_get_artifact_as_file(
175
181
name = _file_name ,
176
182
output_dir = tempd ,
177
183
)
178
- assert pathlib .Path (tempd ).joinpath (_file_name ).exists (), f"Failed to download '{ _file_name } '"
184
+ assert pathlib .Path (tempd ).joinpath (_file_name ).exists (), (
185
+ f"Failed to download '{ _file_name } '"
186
+ )
179
187
180
188
181
189
@pytest .mark .client
@@ -190,7 +198,7 @@ def test_get_artifacts_as_files(
190
198
create_test_run [1 ]["run_id" ], category = category , output_dir = tempd
191
199
)
192
200
files = [os .path .basename (i ) for i in glob .glob (os .path .join (tempd , "*" ))]
193
-
201
+
194
202
if not category :
195
203
expected_files = ["file_1" , "file_2" , "file_3" ]
196
204
elif category == "input" :
@@ -199,7 +207,7 @@ def test_get_artifacts_as_files(
199
207
expected_files = ["file_2" ]
200
208
elif category == "code" :
201
209
expected_files = ["file_3" ]
202
-
210
+
203
211
for file in ["file_1" , "file_2" , "file_3" ]:
204
212
if file in expected_files :
205
213
assert create_test_run [1 ][file ] in files
@@ -215,12 +223,18 @@ def test_get_artifacts_as_files(
215
223
("dataframe" , [("created" , True ), ("started" , True )]),
216
224
("objects" , [("metadata.test_identifier" , True )]),
217
225
],
218
- ids = ("dict-unsorted" , "dataframe-datesorted" , "objects-metasorted" )
226
+ ids = ("dict-unsorted" , "dataframe-datesorted" , "objects-metasorted" ),
219
227
)
220
- def test_get_runs (create_test_run : tuple [sv_run .Run , dict ], output_format : str , sorting : list [tuple [str , bool ]] | None ) -> None :
228
+ def test_get_runs (
229
+ create_test_run : tuple [sv_run .Run , dict ],
230
+ output_format : str ,
231
+ sorting : list [tuple [str , bool ]] | None ,
232
+ ) -> None :
221
233
client = svc .Client ()
222
234
223
- _result = client .get_runs (filters = [], output_format = output_format , count_limit = 10 , sort_by_columns = sorting )
235
+ _result = client .get_runs (
236
+ filters = [], output_format = output_format , count_limit = 10 , sort_by_columns = sorting
237
+ )
224
238
225
239
if output_format == "dataframe" :
226
240
assert not _result .empty
@@ -236,10 +250,13 @@ def test_get_run(create_test_run: tuple[sv_run.Run, dict]) -> None:
236
250
237
251
@pytest .mark .client
238
252
@pytest .mark .parametrize (
239
- "sorting" , (None , [("metadata.test_identifier" , True ), ("path" , True )], [("modified" , False )]),
240
- ids = ("no-sort" , "sort-path-metadata" , "sort-modified" )
253
+ "sorting" ,
254
+ (None , [("metadata.test_identifier" , True ), ("path" , True )], [("modified" , False )]),
255
+ ids = ("no-sort" , "sort-path-metadata" , "sort-modified" ),
241
256
)
242
- def test_get_folders (create_test_run : tuple [sv_run .Run , dict ], sorting : list [tuple [str , bool ]] | None ) -> None :
257
+ def test_get_folders (
258
+ create_test_run : tuple [sv_run .Run , dict ], sorting : list [tuple [str , bool ]] | None
259
+ ) -> None :
243
260
client = svc .Client ()
244
261
assert (folders := client .get_folders (sort_by_columns = sorting ))
245
262
_id , _folder = next (folders )
@@ -252,7 +269,10 @@ def test_get_metrics_names(create_test_run: tuple[sv_run.Run, dict]) -> None:
252
269
client = svc .Client ()
253
270
attempts : int = 0
254
271
255
- while not list (client .get_metrics_names (create_test_run [1 ]["run_id" ])) and attempts < 10 :
272
+ while (
273
+ not list (client .get_metrics_names (create_test_run [1 ]["run_id" ]))
274
+ and attempts < 10
275
+ ):
256
276
time .sleep (1 )
257
277
attempts += 1
258
278
@@ -265,7 +285,10 @@ def test_get_tag(create_plain_run: tuple[sv_run.Run, dict]) -> None:
265
285
_ , run_data = create_plain_run
266
286
client = svc .Client ()
267
287
attempts : int = 0
268
- while not any (tag .name == run_data ["tags" ][- 1 ] for _ , tag in client .get_tags ()) and attempts < 10 :
288
+ while (
289
+ not any (tag .name == run_data ["tags" ][- 1 ] for _ , tag in client .get_tags ())
290
+ and attempts < 10
291
+ ):
269
292
time .sleep (1 )
270
293
attempts += 1
271
294
@@ -276,7 +299,12 @@ def test_get_tag(create_plain_run: tuple[sv_run.Run, dict]) -> None:
276
299
@pytest .mark .client
277
300
def test_run_deletion () -> None :
278
301
run = sv_run .Run ()
279
- run .init (name = "test_run_deletion" , folder = "/simvue_unit_testing" , tags = ["test_run_deletion" ], retention_period = "1 min" )
302
+ run .init (
303
+ name = "test_run_deletion" ,
304
+ folder = "/simvue_unit_testing" ,
305
+ tags = ["test_run_deletion" ],
306
+ retention_period = "1 min" ,
307
+ )
280
308
run .log_metrics ({"x" : 2 })
281
309
run .close ()
282
310
client = svc .Client ()
@@ -289,23 +317,39 @@ def test_run_deletion() -> None:
289
317
def test_runs_deletion () -> None :
290
318
_runs = [sv_run .Run () for _ in range (5 )]
291
319
for i , run in enumerate (_runs ):
292
- run .init (name = "test_runs_deletion" , folder = "/simvue_unit_testing/runs_batch" , tags = ["test_runs_deletion" ], retention_period = "1 min" )
320
+ run .init (
321
+ name = "test_runs_deletion" ,
322
+ folder = "/simvue_unit_testing/runs_batch" ,
323
+ tags = ["test_runs_deletion" ],
324
+ retention_period = "1 min" ,
325
+ )
293
326
run .log_metrics ({"x" : i })
294
327
client = svc .Client ()
295
328
assert len (client .delete_runs ("/simvue_unit_testing/runs_batch" )) > 0
296
329
for run in _runs :
297
330
with pytest .raises (ObjectNotFoundError ):
298
- client .get_run (run .id )
331
+ client .get_run (run .id )
299
332
300
333
301
334
@pytest .mark .client
302
- def test_get_tags (create_plain_run : tuple [sv_run .Run , dict ]) -> None :
303
- run , run_data = create_plain_run
304
- tags = run_data ["tags" ]
305
- run .close ()
335
+ def test_get_tags () -> None :
336
+ _uuid = f"{ uuid .uuid4 ()} " .split ("-" )[0 ]
337
+ tags = ["simvue_unit_testing" , "test_get_tags" , "testing" , _uuid ]
338
+
339
+ with sv_run .Run () as run :
340
+ run .init (
341
+ "test_get_tags" ,
342
+ folder = f"/simvue_unit_testing/{ _uuid } " ,
343
+ tags = tags ,
344
+ retention_period = "2 mins"
345
+ )
346
+
306
347
client = svc .Client ()
307
348
attempts = 0
308
- while not all (f in [t .name for _ , t in client .get_tags ()] for f in tags ) and attempts < 10 :
349
+ while (
350
+ not all (f in [t .name for _ , t in client .get_tags ()] for f in tags )
351
+ and attempts < 10
352
+ ):
309
353
time .sleep (1 )
310
354
attempts += 1
311
355
@@ -317,11 +361,23 @@ def test_get_tags(create_plain_run: tuple[sv_run.Run, dict]) -> None:
317
361
def test_folder_deletion () -> None :
318
362
run = sv_run .Run ()
319
363
_temp_folder_id : str = f"{ uuid .uuid4 ()} " .split ()[0 ]
320
- run .init (name = "test_folder_deletion" , folder = f"/simvue_unit_testing/{ _temp_folder_id } " , tags = ["test_folder_deletion" ], retention_period = "1 min" )
364
+ run .init (
365
+ name = "test_folder_deletion" ,
366
+ folder = f"/simvue_unit_testing/{ _temp_folder_id } " ,
367
+ tags = ["test_folder_deletion" ],
368
+ retention_period = "1 min" ,
369
+ )
321
370
run .close ()
322
371
client = svc .Client ()
323
372
# This test is called last, one run created so expect length 1
324
- assert len (client .delete_folder (f"/simvue_unit_testing/{ _temp_folder_id } " , remove_runs = True )) == 1
373
+ assert (
374
+ len (
375
+ client .delete_folder (
376
+ f"/simvue_unit_testing/{ _temp_folder_id } " , remove_runs = True
377
+ )
378
+ )
379
+ == 1
380
+ )
325
381
326
382
# If the folder has been deleted then an ObjectNotFoundError should be raised
327
383
assert not client .get_folder (f"/simvue_unit_testing/{ _temp_folder_id } " )
@@ -330,26 +386,34 @@ def test_folder_deletion() -> None:
330
386
331
387
332
388
@pytest .mark .client
333
- def test_run_folder_metadata_find (create_plain_run : tuple [sv_run .Run , dict ]) -> None :
334
- run , run_data = create_plain_run
335
- rand_val = random .randint (0 , 1000 )
336
- run .set_folder_details (metadata = {'atest' : rand_val })
337
- run .close ()
338
- time .sleep (1.0 )
389
+ def test_run_folder_metadata_find () -> None :
390
+ _uuid : str = f"{ uuid .uuid4 ()} " .split ()[0 ]
391
+ with sv_run .Run () as run :
392
+ run .init (
393
+ "test_run_folder_metadata_find" ,
394
+ tags = ["test_run_folder_metadata_find" , "testing" ],
395
+ folder = (_folder := f"/simvue_unit_testing/{ _uuid } " ),
396
+ retention_period = "2 mins"
397
+ )
398
+ rand_val = random .randint (0 , 1000 )
399
+ run .set_folder_details (metadata = {"atest" : rand_val })
339
400
client = svc .Client ()
340
- data = client .get_folders (filters = [f' metadata.atest == { rand_val } ' ])
401
+ data = client .get_folders (filters = [f" metadata.atest == { rand_val } " ])
341
402
342
- assert run_data [ "folder" ] in [i .path for _ , i in data ]
403
+ assert _folder in [i .path for _ , i in data ]
343
404
344
405
345
406
@pytest .mark .client
346
407
def test_tag_deletion () -> None :
347
- run = sv_run .Run ()
348
- run .init (name = "test_folder_deletion" , folder = "/simvue_unit_testing" , tags = ["test_tag_deletion" ], retention_period = "1 min" )
349
- run .close ()
350
- unique_id = f"{ uuid .uuid4 ()} " .split ("-" )[0 ]
351
- run .update_tags ([(tag_str := f"delete_me_{ unique_id } " )])
352
- run .close ()
408
+ with sv_run .Run () as run :
409
+ unique_id = f"{ uuid .uuid4 ()} " .split ("-" )[0 ]
410
+ run .init (
411
+ name = "test_folder_deletion" ,
412
+ folder = f"/simvue_unit_testing/{ unique_id } " ,
413
+ tags = ["test_tag_deletion" ],
414
+ retention_period = "1 min" ,
415
+ )
416
+ run .update_tags ([(tag_str := f"delete_me_{ unique_id } " )])
353
417
client = svc .Client ()
354
418
tags = client .get_tags ()
355
419
client .delete_run (run .id )
@@ -398,7 +462,9 @@ def test_multiple_metric_retrieval(
398
462
399
463
@pytest .mark .client
400
464
def test_alert_deletion () -> None :
401
- _alert = sv_api_obj .UserAlert .new (name = "test_alert" , notification = "none" , description = None )
465
+ _alert = sv_api_obj .UserAlert .new (
466
+ name = "test_alert" , notification = "none" , description = None
467
+ )
402
468
_alert .commit ()
403
469
_client = svc .Client ()
404
470
_client .delete_alert (alert_id = _alert .id )
@@ -420,4 +486,3 @@ def test_abort_run(speedy_heartbeat, create_plain_run: tuple[sv_run.Run, dict])
420
486
_attempts += 1
421
487
if _attempts >= 10 :
422
488
raise AssertionError ("Failed to terminate run." )
423
-
0 commit comments