Skip to content

Commit 4876bf1

Browse files
committed
🧪 Further test improvements and formatting
1 parent 9566430 commit 4876bf1

File tree

3 files changed

+155
-82
lines changed

3 files changed

+155
-82
lines changed

‎tests/functional/test_client.py

Lines changed: 138 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -17,68 +17,72 @@
1717
import simvue.api.objects as sv_api_obj
1818
from simvue.api.objects.alert.base import AlertBase
1919

20+
2021
@pytest.mark.client
2122
def test_get_events(create_test_run: tuple[sv_run.Run, dict]) -> None:
2223
client = svc.Client()
2324
assert client.get_events(run_id=create_test_run[1]["run_id"])
2425

2526

2627
@pytest.mark.client
27-
@pytest.mark.parametrize(
28-
"from_run", (True, False), ids=("from_run", "all_runs")
29-
)
30-
@pytest.mark.parametrize(
31-
"names_only", (True, False), ids=("names_only", "all_details")
32-
)
28+
@pytest.mark.parametrize("from_run", (True, False), ids=("from_run", "all_runs"))
29+
@pytest.mark.parametrize("names_only", (True, False), ids=("names_only", "all_details"))
3330
@pytest.mark.parametrize(
3431
"critical_only", (True, False), ids=("critical_only", "all_states")
3532
)
3633
def test_get_alerts(
37-
create_plain_run: tuple[sv_run.Run, dict],
3834
from_run: bool,
3935
names_only: bool,
4036
critical_only: bool,
4137
) -> None:
42-
run, run_data = create_plain_run
43-
run_id = run.id
4438
unique_id = f"{uuid.uuid4()}".split("-")[0]
45-
_id_1 = run.create_user_alert(
46-
name=f"user_alert_1_{unique_id}",
39+
run = sv_run.Run()
40+
run.init(
41+
"test_get_alerts",
42+
folder=f"/simvue_unit_testing/{unique_id}",
43+
tags=["test_get_alerts"],
44+
retention_period="2 mins",
4745
)
48-
run.create_user_alert(
49-
name=f"user_alert_2_{unique_id}",
46+
run_id = run.id
47+
_id_1 = run.create_user_alert(
48+
name=f"user_alert_1_{unique_id}",
5049
)
5150
run.create_user_alert(
52-
name=f"user_alert_3_{unique_id}",
53-
attach_to_run=False
51+
name=f"user_alert_2_{unique_id}",
5452
)
53+
run.create_user_alert(name=f"user_alert_3_{unique_id}", attach_to_run=False)
5554
run.log_alert(identifier=_id_1, state="critical")
5655
time.sleep(2)
5756
run.close()
58-
57+
5958
client = svc.Client()
6059

6160
if critical_only and not from_run:
6261
with pytest.raises(RuntimeError) as e:
63-
_alerts = client.get_alerts(critical_only=critical_only, names_only=names_only)
64-
assert "critical_only is ambiguous when returning alerts with no run ID specified." in str(e.value)
62+
_alerts = client.get_alerts(
63+
critical_only=critical_only, names_only=names_only
64+
)
65+
assert (
66+
"critical_only is ambiguous when returning alerts with no run ID specified."
67+
in str(e.value)
68+
)
6569
else:
6670
sorting = None if run_id else [("name", True), ("created", True)]
6771
_alerts = client.get_alerts(
6872
run_id=run_id if from_run else None,
6973
critical_only=critical_only,
7074
names_only=names_only,
71-
sort_by_columns=sorting
75+
sort_by_columns=sorting,
7276
)
73-
77+
7478
if names_only:
7579
assert all(isinstance(item, str) for item in _alerts)
7680
else:
77-
assert all(isinstance(item, AlertBase) for item in _alerts)
81+
assert all(isinstance(item, AlertBase) for item in _alerts)
7882
_alerts = [alert.name for alert in _alerts]
79-
83+
8084
assert f"user_alert_1_{unique_id}" in _alerts
81-
85+
8286
if not from_run:
8387
assert len(_alerts) > 2
8488
assert f"user_alert_3_{unique_id}" in _alerts
@@ -90,6 +94,7 @@ def test_get_alerts(
9094
assert len(_alerts) == 2
9195
assert f"user_alert_2_{unique_id}" in _alerts
9296

97+
9398
@pytest.mark.client
9499
def test_get_run_id_from_name(create_test_run: tuple[sv_run.Run, dict]) -> None:
95100
client = svc.Client()
@@ -102,12 +107,8 @@ def test_get_run_id_from_name(create_test_run: tuple[sv_run.Run, dict]) -> None:
102107
@pytest.mark.client
103108
@pytest.mark.parametrize(
104109
"aggregate,use_name_labels",
105-
[
106-
(True, False),
107-
(False, False),
108-
(False, True)
109-
],
110-
ids=("aggregate", "complete_ids", "complete_labels")
110+
[(True, False), (False, False), (False, True)],
111+
ids=("aggregate", "complete_ids", "complete_labels"),
111112
)
112113
def test_get_metric_values(
113114
create_test_run: tuple[sv_run.Run, dict], aggregate: bool, use_name_labels: bool
@@ -128,9 +129,9 @@ def test_get_metric_values(
128129
assert create_test_run[1]["metrics"][0] in _metrics_dict.keys()
129130
if aggregate:
130131
_value_types = {i[1] for i in _first_entry}
131-
assert all(
132-
i in _value_types for i in ("average", "min", "max")
133-
), f"Expected ('average', 'min', 'max') in {_value_types}"
132+
assert all(i in _value_types for i in ("average", "min", "max")), (
133+
f"Expected ('average', 'min', 'max') in {_value_types}"
134+
)
134135
elif not use_name_labels:
135136
_runs = {i[1] for i in _first_entry}
136137
assert create_test_run[1]["run_id"] in _runs
@@ -153,12 +154,17 @@ def test_plot_metrics(create_test_run: tuple[sv_run.Run, dict]) -> None:
153154

154155
@pytest.mark.client
155156
@pytest.mark.parametrize(
156-
"sorting", ([("metadata.test_identifier", True)], [("name", True), ("created", True)], None),
157-
ids=("sorted-metadata", "sorted-name-created", None)
157+
"sorting",
158+
([("metadata.test_identifier", True)], [("name", True), ("created", True)], None),
159+
ids=("sorted-metadata", "sorted-name-created", None),
158160
)
159-
def test_get_artifacts_entries(create_test_run: tuple[sv_run.Run, dict], sorting: list[tuple[str, bool]] | None) -> None:
161+
def test_get_artifacts_entries(
162+
create_test_run: tuple[sv_run.Run, dict], sorting: list[tuple[str, bool]] | None
163+
) -> None:
160164
client = svc.Client()
161-
assert dict(client.list_artifacts(create_test_run[1]["run_id"], sort_by_columns=sorting))
165+
assert dict(
166+
client.list_artifacts(create_test_run[1]["run_id"], sort_by_columns=sorting)
167+
)
162168
assert client.get_artifact(create_test_run[1]["run_id"], name="test_attributes")
163169

164170

@@ -175,7 +181,9 @@ def test_get_artifact_as_file(
175181
name=_file_name,
176182
output_dir=tempd,
177183
)
178-
assert pathlib.Path(tempd).joinpath(_file_name).exists(), f"Failed to download '{_file_name}'"
184+
assert pathlib.Path(tempd).joinpath(_file_name).exists(), (
185+
f"Failed to download '{_file_name}'"
186+
)
179187

180188

181189
@pytest.mark.client
@@ -190,7 +198,7 @@ def test_get_artifacts_as_files(
190198
create_test_run[1]["run_id"], category=category, output_dir=tempd
191199
)
192200
files = [os.path.basename(i) for i in glob.glob(os.path.join(tempd, "*"))]
193-
201+
194202
if not category:
195203
expected_files = ["file_1", "file_2", "file_3"]
196204
elif category == "input":
@@ -199,7 +207,7 @@ def test_get_artifacts_as_files(
199207
expected_files = ["file_2"]
200208
elif category == "code":
201209
expected_files = ["file_3"]
202-
210+
203211
for file in ["file_1", "file_2", "file_3"]:
204212
if file in expected_files:
205213
assert create_test_run[1][file] in files
@@ -215,12 +223,18 @@ def test_get_artifacts_as_files(
215223
("dataframe", [("created", True), ("started", True)]),
216224
("objects", [("metadata.test_identifier", True)]),
217225
],
218-
ids=("dict-unsorted", "dataframe-datesorted", "objects-metasorted")
226+
ids=("dict-unsorted", "dataframe-datesorted", "objects-metasorted"),
219227
)
220-
def test_get_runs(create_test_run: tuple[sv_run.Run, dict], output_format: str, sorting: list[tuple[str, bool]] | None) -> None:
228+
def test_get_runs(
229+
create_test_run: tuple[sv_run.Run, dict],
230+
output_format: str,
231+
sorting: list[tuple[str, bool]] | None,
232+
) -> None:
221233
client = svc.Client()
222234

223-
_result = client.get_runs(filters=[], output_format=output_format, count_limit=10, sort_by_columns=sorting)
235+
_result = client.get_runs(
236+
filters=[], output_format=output_format, count_limit=10, sort_by_columns=sorting
237+
)
224238

225239
if output_format == "dataframe":
226240
assert not _result.empty
@@ -236,10 +250,13 @@ def test_get_run(create_test_run: tuple[sv_run.Run, dict]) -> None:
236250

237251
@pytest.mark.client
238252
@pytest.mark.parametrize(
239-
"sorting", (None, [("metadata.test_identifier", True), ("path", True)], [("modified", False)]),
240-
ids=("no-sort", "sort-path-metadata", "sort-modified")
253+
"sorting",
254+
(None, [("metadata.test_identifier", True), ("path", True)], [("modified", False)]),
255+
ids=("no-sort", "sort-path-metadata", "sort-modified"),
241256
)
242-
def test_get_folders(create_test_run: tuple[sv_run.Run, dict], sorting: list[tuple[str, bool]] | None) -> None:
257+
def test_get_folders(
258+
create_test_run: tuple[sv_run.Run, dict], sorting: list[tuple[str, bool]] | None
259+
) -> None:
243260
client = svc.Client()
244261
assert (folders := client.get_folders(sort_by_columns=sorting))
245262
_id, _folder = next(folders)
@@ -252,7 +269,10 @@ def test_get_metrics_names(create_test_run: tuple[sv_run.Run, dict]) -> None:
252269
client = svc.Client()
253270
attempts: int = 0
254271

255-
while not list(client.get_metrics_names(create_test_run[1]["run_id"])) and attempts < 10:
272+
while (
273+
not list(client.get_metrics_names(create_test_run[1]["run_id"]))
274+
and attempts < 10
275+
):
256276
time.sleep(1)
257277
attempts += 1
258278

@@ -265,7 +285,10 @@ def test_get_tag(create_plain_run: tuple[sv_run.Run, dict]) -> None:
265285
_, run_data = create_plain_run
266286
client = svc.Client()
267287
attempts: int = 0
268-
while not any(tag.name == run_data["tags"][-1] for _, tag in client.get_tags()) and attempts < 10:
288+
while (
289+
not any(tag.name == run_data["tags"][-1] for _, tag in client.get_tags())
290+
and attempts < 10
291+
):
269292
time.sleep(1)
270293
attempts += 1
271294

@@ -276,7 +299,12 @@ def test_get_tag(create_plain_run: tuple[sv_run.Run, dict]) -> None:
276299
@pytest.mark.client
277300
def test_run_deletion() -> None:
278301
run = sv_run.Run()
279-
run.init(name="test_run_deletion", folder="/simvue_unit_testing", tags=["test_run_deletion"], retention_period="1 min")
302+
run.init(
303+
name="test_run_deletion",
304+
folder="/simvue_unit_testing",
305+
tags=["test_run_deletion"],
306+
retention_period="1 min",
307+
)
280308
run.log_metrics({"x": 2})
281309
run.close()
282310
client = svc.Client()
@@ -289,23 +317,39 @@ def test_run_deletion() -> None:
289317
def test_runs_deletion() -> None:
290318
_runs = [sv_run.Run() for _ in range(5)]
291319
for i, run in enumerate(_runs):
292-
run.init(name="test_runs_deletion", folder="/simvue_unit_testing/runs_batch", tags=["test_runs_deletion"], retention_period="1 min")
320+
run.init(
321+
name="test_runs_deletion",
322+
folder="/simvue_unit_testing/runs_batch",
323+
tags=["test_runs_deletion"],
324+
retention_period="1 min",
325+
)
293326
run.log_metrics({"x": i})
294327
client = svc.Client()
295328
assert len(client.delete_runs("/simvue_unit_testing/runs_batch")) > 0
296329
for run in _runs:
297330
with pytest.raises(ObjectNotFoundError):
298-
client.get_run(run.id)
331+
client.get_run(run.id)
299332

300333

301334
@pytest.mark.client
302-
def test_get_tags(create_plain_run: tuple[sv_run.Run, dict]) -> None:
303-
run, run_data = create_plain_run
304-
tags = run_data["tags"]
305-
run.close()
335+
def test_get_tags() -> None:
336+
_uuid = f"{uuid.uuid4()}".split("-")[0]
337+
tags = ["simvue_unit_testing", "test_get_tags", "testing", _uuid]
338+
339+
with sv_run.Run() as run:
340+
run.init(
341+
"test_get_tags",
342+
folder=f"/simvue_unit_testing/{_uuid}",
343+
tags=tags,
344+
retention_period="2 mins"
345+
)
346+
306347
client = svc.Client()
307348
attempts = 0
308-
while not all(f in [t.name for _, t in client.get_tags()] for f in tags) and attempts < 10:
349+
while (
350+
not all(f in [t.name for _, t in client.get_tags()] for f in tags)
351+
and attempts < 10
352+
):
309353
time.sleep(1)
310354
attempts += 1
311355

@@ -317,11 +361,23 @@ def test_get_tags(create_plain_run: tuple[sv_run.Run, dict]) -> None:
317361
def test_folder_deletion() -> None:
318362
run = sv_run.Run()
319363
_temp_folder_id: str = f"{uuid.uuid4()}".split()[0]
320-
run.init(name="test_folder_deletion", folder=f"/simvue_unit_testing/{_temp_folder_id}", tags=["test_folder_deletion"], retention_period="1 min")
364+
run.init(
365+
name="test_folder_deletion",
366+
folder=f"/simvue_unit_testing/{_temp_folder_id}",
367+
tags=["test_folder_deletion"],
368+
retention_period="1 min",
369+
)
321370
run.close()
322371
client = svc.Client()
323372
# This test is called last, one run created so expect length 1
324-
assert len(client.delete_folder(f"/simvue_unit_testing/{_temp_folder_id}", remove_runs=True)) == 1
373+
assert (
374+
len(
375+
client.delete_folder(
376+
f"/simvue_unit_testing/{_temp_folder_id}", remove_runs=True
377+
)
378+
)
379+
== 1
380+
)
325381

326382
# If the folder has been deleted then an ObjectNotFoundError should be raised
327383
assert not client.get_folder(f"/simvue_unit_testing/{_temp_folder_id}")
@@ -330,26 +386,34 @@ def test_folder_deletion() -> None:
330386

331387

332388
@pytest.mark.client
333-
def test_run_folder_metadata_find(create_plain_run: tuple[sv_run.Run, dict]) -> None:
334-
run, run_data = create_plain_run
335-
rand_val = random.randint(0, 1000)
336-
run.set_folder_details(metadata={'atest': rand_val})
337-
run.close()
338-
time.sleep(1.0)
389+
def test_run_folder_metadata_find() -> None:
390+
_uuid: str = f"{uuid.uuid4()}".split()[0]
391+
with sv_run.Run() as run:
392+
run.init(
393+
"test_run_folder_metadata_find",
394+
tags=["test_run_folder_metadata_find", "testing"],
395+
folder=(_folder := f"/simvue_unit_testing/{_uuid}"),
396+
retention_period="2 mins"
397+
)
398+
rand_val = random.randint(0, 1000)
399+
run.set_folder_details(metadata={"atest": rand_val})
339400
client = svc.Client()
340-
data = client.get_folders(filters=[f'metadata.atest == {rand_val}'])
401+
data = client.get_folders(filters=[f"metadata.atest == {rand_val}"])
341402

342-
assert run_data["folder"] in [i.path for _, i in data]
403+
assert _folder in [i.path for _, i in data]
343404

344405

345406
@pytest.mark.client
346407
def test_tag_deletion() -> None:
347-
run = sv_run.Run()
348-
run.init(name="test_folder_deletion", folder="/simvue_unit_testing", tags=["test_tag_deletion"], retention_period="1 min")
349-
run.close()
350-
unique_id = f"{uuid.uuid4()}".split("-")[0]
351-
run.update_tags([(tag_str := f"delete_me_{unique_id}")])
352-
run.close()
408+
with sv_run.Run() as run:
409+
unique_id = f"{uuid.uuid4()}".split("-")[0]
410+
run.init(
411+
name="test_folder_deletion",
412+
folder=f"/simvue_unit_testing/{unique_id}",
413+
tags=["test_tag_deletion"],
414+
retention_period="1 min",
415+
)
416+
run.update_tags([(tag_str := f"delete_me_{unique_id}")])
353417
client = svc.Client()
354418
tags = client.get_tags()
355419
client.delete_run(run.id)
@@ -398,7 +462,9 @@ def test_multiple_metric_retrieval(
398462

399463
@pytest.mark.client
400464
def test_alert_deletion() -> None:
401-
_alert = sv_api_obj.UserAlert.new(name="test_alert", notification="none", description=None)
465+
_alert = sv_api_obj.UserAlert.new(
466+
name="test_alert", notification="none", description=None
467+
)
402468
_alert.commit()
403469
_client = svc.Client()
404470
_client.delete_alert(alert_id=_alert.id)
@@ -420,4 +486,3 @@ def test_abort_run(speedy_heartbeat, create_plain_run: tuple[sv_run.Run, dict])
420486
_attempts += 1
421487
if _attempts >= 10:
422488
raise AssertionError("Failed to terminate run.")
423-

0 commit comments

Comments
 (0)