Skip to content

Commit 4551b7a

Browse files
committed
Merge branch 'kzscisoft/fix-folder-deletion-inconsistencies' into kzscisoft/test-fixes
2 parents 98ad8cd + 17afc8c commit 4551b7a

File tree

7 files changed

+424
-314
lines changed

7 files changed

+424
-314
lines changed

poetry.lock

Lines changed: 307 additions & 248 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

simvue/api/objects/base.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -412,6 +412,10 @@ def get(
412412
f"Expected key 'data' for retrieval of {_class_instance.__class__.__name__.lower()}s"
413413
)
414414

415+
# If data is an empty list
416+
if not _data:
417+
return
418+
415419
for entry in _data:
416420
_id = entry["id"]
417421
yield _id, cls(_read_only=True, identifier=_id, _local=True, **entry)

simvue/client.py

Lines changed: 17 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,16 @@
3232
from .models import FOLDER_REGEX, NAME_REGEX
3333
from .config.user import SimvueConfiguration
3434
from .api.request import get_json_from_response
35-
from .api.objects import Run, Folder, Tag, Artifact, Alert, FileArtifact, ObjectArtifact
35+
from .api.objects import (
36+
Run,
37+
Folder,
38+
Tag,
39+
Artifact,
40+
Alert,
41+
FileArtifact,
42+
ObjectArtifact,
43+
get_folder_from_path,
44+
)
3645

3746

3847
CONCURRENT_DOWNLOADS = 10
@@ -429,6 +438,9 @@ def delete_folder(
429438
delete_runs=remove_runs, recursive=recursive, runs_only=False
430439
)
431440

441+
if folder_path not in _response.get("folders", []):
442+
raise RuntimeError("Deletion of folder failed, server returned mismatch.")
443+
432444
return _response.get("runs", [])
433445

434446
@prettify_pydantic
@@ -665,17 +677,12 @@ def get_folder(
665677
RuntimeError
666678
if there was a failure when retrieving information from the server
667679
"""
668-
_folders: typing.Generator[tuple[str, Folder], None, None] = Folder.get(
669-
filters=json.dumps([f"path == {folder_path}"])
670-
) # type: ignore
671-
672680
try:
673-
_, _folder = next(_folders)
674-
if not read_only:
675-
_folder.read_only(read_only)
676-
return _folder
677-
except StopIteration:
681+
_folder = get_folder_from_path(path=folder_path)
682+
except ObjectNotFoundError:
678683
return None
684+
_folder.read_only(is_read_only=read_only)
685+
return _folder
679686

680687
@pydantic.validate_call
681688
def get_folders(

simvue/run.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -756,7 +756,7 @@ def init(
756756

757757
if self._user_config.run.mode == "online":
758758
click.secho(
759-
f"[simvue] Run {self._sv_obj.name} created",
759+
f"[simvue] Run {self.name} created",
760760
bold=self._term_color,
761761
fg="green" if self._term_color else None,
762762
)

tests/conftest.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -186,7 +186,7 @@ def create_plain_run_offline(request,prevent_script_exit,monkeypatch) -> typing.
186186
clear_out_files()
187187

188188

189-
@pytest.fixture
189+
@pytest.fixture(scope="function")
190190
def create_run_object(mocker: pytest_mock.MockFixture) -> sv_api_obj.Run:
191191
def testing_exit(status: int) -> None:
192192
raise SystemExit(status)

tests/functional/test_client.py

Lines changed: 93 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@
1515
import simvue.api.objects as sv_api_obj
1616
from simvue.api.objects.alert.base import AlertBase
1717

18+
1819
@pytest.mark.dependency
1920
@pytest.mark.client
2021
def test_get_events(create_test_run: tuple[sv_run.Run, dict]) -> None:
@@ -24,12 +25,8 @@ def test_get_events(create_test_run: tuple[sv_run.Run, dict]) -> None:
2425

2526
@pytest.mark.dependency
2627
@pytest.mark.client
27-
@pytest.mark.parametrize(
28-
"from_run", (True, False), ids=("from_run", "all_runs")
29-
)
30-
@pytest.mark.parametrize(
31-
"names_only", (True, False), ids=("names_only", "all_details")
32-
)
28+
@pytest.mark.parametrize("from_run", (True, False), ids=("from_run", "all_runs"))
29+
@pytest.mark.parametrize("names_only", (True, False), ids=("names_only", "all_details"))
3330
@pytest.mark.parametrize(
3431
"critical_only", (True, False), ids=("critical_only", "all_states")
3532
)
@@ -43,42 +40,44 @@ def test_get_alerts(
4340
run_id = run.id
4441
unique_id = f"{uuid.uuid4()}".split("-")[0]
4542
_id_1 = run.create_user_alert(
46-
name=f"user_alert_1_{unique_id}",
43+
name=f"user_alert_1_{unique_id}",
4744
)
4845
run.create_user_alert(
49-
name=f"user_alert_2_{unique_id}",
50-
)
51-
run.create_user_alert(
52-
name=f"user_alert_3_{unique_id}",
53-
attach_to_run=False
46+
name=f"user_alert_2_{unique_id}",
5447
)
48+
run.create_user_alert(name=f"user_alert_3_{unique_id}", attach_to_run=False)
5549
run.log_alert(identifier=_id_1, state="critical")
5650
time.sleep(2)
5751
run.close()
58-
52+
5953
client = svc.Client()
6054

6155
if critical_only and not from_run:
6256
with pytest.raises(RuntimeError) as e:
63-
_alerts = client.get_alerts(critical_only=critical_only, names_only=names_only)
64-
assert "critical_only is ambiguous when returning alerts with no run ID specified." in str(e.value)
57+
_alerts = client.get_alerts(
58+
critical_only=critical_only, names_only=names_only
59+
)
60+
assert (
61+
"critical_only is ambiguous when returning alerts with no run ID specified."
62+
in str(e.value)
63+
)
6564
else:
6665
sorting = None if run_id else [("name", True), ("created", True)]
6766
_alerts = client.get_alerts(
6867
run_id=run_id if from_run else None,
6968
critical_only=critical_only,
7069
names_only=names_only,
71-
sort_by_columns=sorting
70+
sort_by_columns=sorting,
7271
)
73-
72+
7473
if names_only:
7574
assert all(isinstance(item, str) for item in _alerts)
7675
else:
77-
assert all(isinstance(item, AlertBase) for item in _alerts)
76+
assert all(isinstance(item, AlertBase) for item in _alerts)
7877
_alerts = [alert.name for alert in _alerts]
79-
78+
8079
assert f"user_alert_1_{unique_id}" in _alerts
81-
80+
8281
if not from_run:
8382
assert len(_alerts) > 2
8483
assert f"user_alert_3_{unique_id}" in _alerts
@@ -90,6 +89,7 @@ def test_get_alerts(
9089
assert len(_alerts) == 2
9190
assert f"user_alert_2_{unique_id}" in _alerts
9291

92+
9393
@pytest.mark.dependency
9494
@pytest.mark.client
9595
def test_get_run_id_from_name(create_test_run: tuple[sv_run.Run, dict]) -> None:
@@ -104,12 +104,8 @@ def test_get_run_id_from_name(create_test_run: tuple[sv_run.Run, dict]) -> None:
104104
@pytest.mark.client
105105
@pytest.mark.parametrize(
106106
"aggregate,use_name_labels",
107-
[
108-
(True, False),
109-
(False, False),
110-
(False, True)
111-
],
112-
ids=("aggregate", "complete_ids", "complete_labels")
107+
[(True, False), (False, False), (False, True)],
108+
ids=("aggregate", "complete_ids", "complete_labels"),
113109
)
114110
def test_get_metric_values(
115111
create_test_run: tuple[sv_run.Run, dict], aggregate: bool, use_name_labels: bool
@@ -130,9 +126,9 @@ def test_get_metric_values(
130126
assert create_test_run[1]["metrics"][0] in _metrics_dict.keys()
131127
if aggregate:
132128
_value_types = {i[1] for i in _first_entry}
133-
assert all(
134-
i in _value_types for i in ("average", "min", "max")
135-
), f"Expected ('average', 'min', 'max') in {_value_types}"
129+
assert all(i in _value_types for i in ("average", "min", "max")), (
130+
f"Expected ('average', 'min', 'max') in {_value_types}"
131+
)
136132
elif not use_name_labels:
137133
_runs = {i[1] for i in _first_entry}
138134
assert create_test_run[1]["run_id"] in _runs
@@ -157,12 +153,17 @@ def test_plot_metrics(create_test_run: tuple[sv_run.Run, dict]) -> None:
157153
@pytest.mark.dependency
158154
@pytest.mark.client
159155
@pytest.mark.parametrize(
160-
"sorting", ([("metadata.test_identifier", True)], [("name", True), ("created", True)], None),
161-
ids=("sorted-metadata", "sorted-name-created", None)
156+
"sorting",
157+
([("metadata.test_identifier", True)], [("name", True), ("created", True)], None),
158+
ids=("sorted-metadata", "sorted-name-created", None),
162159
)
163-
def test_get_artifacts_entries(create_test_run: tuple[sv_run.Run, dict], sorting: list[tuple[str, bool]] | None) -> None:
160+
def test_get_artifacts_entries(
161+
create_test_run: tuple[sv_run.Run, dict], sorting: list[tuple[str, bool]] | None
162+
) -> None:
164163
client = svc.Client()
165-
assert dict(client.list_artifacts(create_test_run[1]["run_id"], sort_by_columns=sorting))
164+
assert dict(
165+
client.list_artifacts(create_test_run[1]["run_id"], sort_by_columns=sorting)
166+
)
166167
assert client.get_artifact(create_test_run[1]["run_id"], name="test_attributes")
167168

168169

@@ -180,7 +181,9 @@ def test_get_artifact_as_file(
180181
name=_file_name,
181182
output_dir=tempd,
182183
)
183-
assert pathlib.Path(tempd).joinpath(_file_name).exists(), f"Failed to download '{_file_name}'"
184+
assert pathlib.Path(tempd).joinpath(_file_name).exists(), (
185+
f"Failed to download '{_file_name}'"
186+
)
184187

185188

186189
@pytest.mark.dependency
@@ -196,7 +199,7 @@ def test_get_artifacts_as_files(
196199
create_test_run[1]["run_id"], category=category, output_dir=tempd
197200
)
198201
files = [os.path.basename(i) for i in glob.glob(os.path.join(tempd, "*"))]
199-
202+
200203
if not category:
201204
expected_files = ["file_1", "file_2", "file_3"]
202205
elif category == "input":
@@ -205,7 +208,7 @@ def test_get_artifacts_as_files(
205208
expected_files = ["file_2"]
206209
elif category == "code":
207210
expected_files = ["file_3"]
208-
211+
209212
for file in ["file_1", "file_2", "file_3"]:
210213
if file in expected_files:
211214
assert create_test_run[1][file] in files
@@ -222,12 +225,18 @@ def test_get_artifacts_as_files(
222225
("dataframe", [("created", True), ("started", True)]),
223226
("objects", [("metadata.test_identifier", True)]),
224227
],
225-
ids=("dict-unsorted", "dataframe-datesorted", "objects-metasorted")
228+
ids=("dict-unsorted", "dataframe-datesorted", "objects-metasorted"),
226229
)
227-
def test_get_runs(create_test_run: tuple[sv_run.Run, dict], output_format: str, sorting: list[tuple[str, bool]] | None) -> None:
230+
def test_get_runs(
231+
create_test_run: tuple[sv_run.Run, dict],
232+
output_format: str,
233+
sorting: list[tuple[str, bool]] | None,
234+
) -> None:
228235
client = svc.Client()
229236

230-
_result = client.get_runs(filters=[], output_format=output_format, count_limit=10, sort_by_columns=sorting)
237+
_result = client.get_runs(
238+
filters=[], output_format=output_format, count_limit=10, sort_by_columns=sorting
239+
)
231240

232241
if output_format == "dataframe":
233242
assert not _result.empty
@@ -245,10 +254,13 @@ def test_get_run(create_test_run: tuple[sv_run.Run, dict]) -> None:
245254
@pytest.mark.dependency
246255
@pytest.mark.client
247256
@pytest.mark.parametrize(
248-
"sorting", (None, [("metadata.test_identifier", True), ("path", True)], [("modified", False)]),
249-
ids=("no-sort", "sort-path-metadata", "sort-modified")
257+
"sorting",
258+
(None, [("metadata.test_identifier", True), ("path", True)], [("modified", False)]),
259+
ids=("no-sort", "sort-path-metadata", "sort-modified"),
250260
)
251-
def test_get_folders(create_test_run: tuple[sv_run.Run, dict], sorting: list[tuple[str, bool]] | None) -> None:
261+
def test_get_folders(
262+
create_test_run: tuple[sv_run.Run, dict], sorting: list[tuple[str, bool]] | None
263+
) -> None:
252264
client = svc.Client()
253265
assert (folders := client.get_folders(sort_by_columns=sorting))
254266
_id, _folder = next(folders)
@@ -277,7 +289,12 @@ def test_get_tag(create_plain_run: tuple[sv_run.Run, dict]) -> None:
277289
@pytest.mark.client
278290
def test_run_deletion() -> None:
279291
run = sv_run.Run()
280-
run.init(name="test_run_deletion", folder="/simvue_unit_testing", tags=["test_run_deletion"], retention_period="1 min")
292+
run.init(
293+
name="test_run_deletion",
294+
folder="/simvue_unit_testing",
295+
tags=["test_run_deletion"],
296+
retention_period="1 min",
297+
)
281298
run.log_metrics({"x": 2})
282299
run.close()
283300
client = svc.Client()
@@ -291,13 +308,18 @@ def test_run_deletion() -> None:
291308
def test_runs_deletion() -> None:
292309
_runs = [sv_run.Run() for _ in range(5)]
293310
for i, run in enumerate(_runs):
294-
run.init(name="test_runs_deletion", folder="/simvue_unit_testing/runs_batch", tags=["test_runs_deletion"], retention_period="1 min")
311+
run.init(
312+
name="test_runs_deletion",
313+
folder="/simvue_unit_testing/runs_batch",
314+
tags=["test_runs_deletion"],
315+
retention_period="1 min",
316+
)
295317
run.log_metrics({"x": i})
296318
client = svc.Client()
297319
assert len(client.delete_runs("/simvue_unit_testing/runs_batch")) > 0
298320
for run in _runs:
299321
with pytest.raises(ObjectNotFoundError):
300-
client.get_run(run.id)
322+
client.get_run(run.id)
301323

302324

303325
@pytest.mark.dependency
@@ -316,11 +338,24 @@ def test_get_tags(create_plain_run: tuple[sv_run.Run, dict]) -> None:
316338
@pytest.mark.client
317339
def test_folder_deletion() -> None:
318340
run = sv_run.Run()
319-
run.init(name="test_folder_deletion", folder="/simvue_unit_testing/delete_me", tags=["test_folder_deletion"], retention_period="1 min")
341+
_temp_folder_id: str = f"{uuid.uuid4()}".split()[0]
342+
run.init(
343+
name="test_folder_deletion",
344+
folder=f"/simvue_unit_testing/{_temp_folder_id}",
345+
tags=["test_folder_deletion"],
346+
retention_period="1 min",
347+
)
320348
run.close()
321349
client = svc.Client()
322350
# This test is called last, one run created so expect length 1
323-
assert len(client.delete_folder("/simvue_unit_testing/delete_me", remove_runs=True)) == 1
351+
assert (
352+
len(
353+
client.delete_folder(
354+
f"/simvue_unit_testing/{_temp_folder_id}", remove_runs=True
355+
)
356+
)
357+
== 1
358+
)
324359
time.sleep(10)
325360
with pytest.raises(ObjectNotFoundError):
326361
client.get_folder("/simvue_unit_testing/delete_me")
@@ -332,19 +367,24 @@ def test_folder_deletion() -> None:
332367
def test_run_folder_metadata_find(create_plain_run: tuple[sv_run.Run, dict]) -> None:
333368
run, run_data = create_plain_run
334369
rand_val = random.randint(0, 1000)
335-
run.set_folder_details(metadata={'atest': rand_val})
370+
run.set_folder_details(metadata={"atest": rand_val})
336371
run.close()
337372
time.sleep(1.0)
338373
client = svc.Client()
339-
data = client.get_folders(filters=[f'metadata.atest == {rand_val}'])
374+
data = client.get_folders(filters=[f"metadata.atest == {rand_val}"])
340375

341376
assert run_data["folder"] in [i.path for _, i in data]
342377

343378

344379
@pytest.mark.client
345380
def test_tag_deletion() -> None:
346381
run = sv_run.Run()
347-
run.init(name="test_folder_deletion", folder="/simvue_unit_testing/delete_me", tags=["test_tag_deletion"], retention_period="1 min")
382+
run.init(
383+
name="test_folder_deletion",
384+
folder="/simvue_unit_testing",
385+
tags=["test_tag_deletion"],
386+
retention_period="1 min",
387+
)
348388
run.close()
349389
unique_id = f"{uuid.uuid4()}".split("-")[0]
350390
run.update_tags([(tag_str := f"delete_me_{unique_id}")])
@@ -398,7 +438,9 @@ def test_multiple_metric_retrieval(
398438

399439
@pytest.mark.client
400440
def test_alert_deletion() -> None:
401-
_alert = sv_api_obj.UserAlert.new(name="test_alert", notification="none", description=None)
441+
_alert = sv_api_obj.UserAlert.new(
442+
name="test_alert", notification="none", description=None
443+
)
402444
_alert.commit()
403445
_client = svc.Client()
404446
_client.delete_alert(alert_id=_alert.id)
@@ -422,5 +464,3 @@ def test_abort_run(speedy_heartbeat, create_plain_run: tuple[sv_run.Run, dict])
422464
except AssertionError:
423465
time.sleep(2)
424466
assert run._status == "terminated"
425-
426-

0 commit comments

Comments
 (0)