From 9b6e53b53414477aee42d50a021f3de56dab5255 Mon Sep 17 00:00:00 2001 From: Richard Shaw Date: Tue, 15 Aug 2023 22:52:24 -0700 Subject: [PATCH 1/8] feat(memh5): allow hints arg to force distributed dataset loading This extends the hints argument to a dictionary to allow it to force distributed loading of datasets with no specific `__memh5_distributed` markers. --- caput/memh5.py | 47 ++++++++++++++++++++++++++++++++++++----------- 1 file changed, 36 insertions(+), 11 deletions(-) diff --git a/caput/memh5.py b/caput/memh5.py index 47135131..121b8e54 100644 --- a/caput/memh5.py +++ b/caput/memh5.py @@ -49,6 +49,8 @@ """ from __future__ import annotations +from pathlib import Path +from typing import Any, Union, TYPE_CHECKING import datetime import json @@ -65,6 +67,10 @@ from . import fileformats, misc, mpiarray, mpiutil, tools +if TYPE_CHECKING: + from mpi4py import MPI + + logger = logging.getLogger(__name__) try: @@ -2863,18 +2869,22 @@ def _write_distributed_datasets(dest): def _distributed_group_from_file( - fname, - comm=None, - _=True, # usually `hints`, but hints do not do anything in this method - convert_dataset_strings=False, - convert_attribute_strings=True, - file_format=fileformats.HDF5, + fname: Union[str, Path], + comm: "MPI.Comm" = None, + hints: Union[bool, dict] = True, + convert_dataset_strings: bool = False, + convert_attribute_strings: bool = True, + file_format: type[fileformats.FileFormat] = fileformats.HDF5, **kwargs, ): - """Restore full tree from an HDF5 file or Zarr group into a distributed memh5 object. + """Restore full tree from an HDF5 or Zarr into a distributed memh5 object. A `selections=` parameter may be supplied as parts of 'kwargs'. See `_deep_group_copy' for a description. + + Hints may be a dictionary that can override the settings in the file itself. The + keys should be the path to the dataset and the value a dictionary with keys + `distributed` (boolean, required) and `axis` (integer, optional). """ # Create root group group = MemGroup(distributed=True, comm=comm) @@ -2882,6 +2892,12 @@ def _distributed_group_from_file( selections = kwargs.pop("selections", None) + # Fill the hints dict if set + hints_dict = {} + if isinstance(hints, dict): + hints_dict = hints + hints = True + # == Create some internal functions for doing the read == # Copy over attributes with a broadcast from rank = 0 def _copy_attrs_bcast(h5item, memitem, **kwargs): @@ -2912,11 +2928,20 @@ def _copy_from_file(h5group, memgroup, selections=None): # If dataset, create dataset else: + + dset_hints = hints_dict.get(key, {}) + + distributed = hints and ( + dset_hints.get("distributed", False) + or item.attrs.get("__memh5_distributed_dset", False) + ) # Check if we are in a distributed dataset - if ("__memh5_distributed_dset" in item.attrs) and item.attrs[ - "__memh5_distributed_dset" - ]: - distributed_axis = item.attrs.get("__memh5_distributed_axis", 0) + if distributed: + distributed_axis = ( + dset_hints["axis"] + if "axis" in dset_hints + else item.attrs.get("__memh5_distributed_axis", 0) + ) # Read from file into MPIArray pdata = mpiarray.MPIArray.from_file( From a98507726c57dba08bac46f94fbff606f1206cf3 Mon Sep 17 00:00:00 2001 From: Richard Shaw Date: Tue, 15 Aug 2023 23:47:45 -0700 Subject: [PATCH 2/8] fix(memh5): error with distributed loading of files with datasets in groups --- caput/memh5.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/caput/memh5.py b/caput/memh5.py index 121b8e54..74e9354f 100644 --- a/caput/memh5.py +++ b/caput/memh5.py @@ -50,7 +50,7 @@ from __future__ import annotations from pathlib import Path -from typing import Any, Union, TYPE_CHECKING +from typing import Any, Optional, Type, Union, TYPE_CHECKING import datetime import json @@ -2870,11 +2870,11 @@ def _write_distributed_datasets(dest): def _distributed_group_from_file( fname: Union[str, Path], - comm: "MPI.Comm" = None, + comm: Optional["MPI.Comm"] = None, hints: Union[bool, dict] = True, convert_dataset_strings: bool = False, convert_attribute_strings: bool = True, - file_format: type[fileformats.FileFormat] = fileformats.HDF5, + file_format: Type[fileformats.FileFormat] = fileformats.HDF5, **kwargs, ): """Restore full tree from an HDF5 or Zarr into a distributed memh5 object. @@ -2928,8 +2928,7 @@ def _copy_from_file(h5group, memgroup, selections=None): # If dataset, create dataset else: - - dset_hints = hints_dict.get(key, {}) + dset_hints = hints_dict.get(item.name, {}) distributed = hints and ( dset_hints.get("distributed", False) @@ -2945,8 +2944,8 @@ def _copy_from_file(h5group, memgroup, selections=None): # Read from file into MPIArray pdata = mpiarray.MPIArray.from_file( - h5group, - key, + fname, + item.name, axis=distributed_axis, comm=comm, sel=selection, From 929cd1d8be5396087be638ae0b2d0fe7ca7920e9 Mon Sep 17 00:00:00 2001 From: Richard Shaw Date: Wed, 16 Aug 2023 09:18:00 -0700 Subject: [PATCH 3/8] refactor(open_h5py_mpi): add type hints --- caput/misc.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/caput/misc.py b/caput/misc.py index b162fef5..c8a80200 100644 --- a/caput/misc.py +++ b/caput/misc.py @@ -2,9 +2,15 @@ import importlib import os +from pathlib import Path +from typing import TYPE_CHECKING, Optional, Union, overload +import h5py import numpy as np +if TYPE_CHECKING: + from mpi4py import MPI + def vectorize(**base_kwargs): """Improved vectorization decorator. @@ -182,6 +188,23 @@ def __get__(self, obj, objtype=None): return _listize_desc +@overload +def open_h5py_mpi( + f: Union[str, Path, h5py.File], + mode: str, + use_mpi: bool = True, + comm: Optional["MPI.Comm"] = None, +) -> h5py.File: + ... + + +@overload +def open_h5py_mpi( + f: h5py.Group, mode: str, use_mpi: bool = True, comm: Optional["MPI.Comm"] = None +) -> h5py.Group: + ... + + def open_h5py_mpi(f, mode, use_mpi=True, comm=None): """Ensure that we have an h5py File object. From aebc0cc083636321b04af02ade85c87a715d9147 Mon Sep 17 00:00:00 2001 From: Richard Shaw Date: Wed, 16 Aug 2023 10:00:07 -0700 Subject: [PATCH 4/8] chore: make readthedocs less strict for unreproducable failures --- .readthedocs.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.readthedocs.yml b/.readthedocs.yml index 929d2d4d..08994e77 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -14,7 +14,7 @@ build: # Build documentation in the docs/ directory with Sphinx sphinx: configuration: doc/conf.py - fail_on_warning: true + fail_on_warning: false python: install: From 6999778a0f54a684ed56f978b01c5dbb6578856f Mon Sep 17 00:00:00 2001 From: Richard Shaw Date: Tue, 30 Jan 2024 16:30:17 -0800 Subject: [PATCH 5/8] fix(memh5): do not pass hints for ondisk loads --- caput/memh5.py | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/caput/memh5.py b/caput/memh5.py index 74e9354f..c4e7e491 100644 --- a/caput/memh5.py +++ b/caput/memh5.py @@ -49,8 +49,6 @@ """ from __future__ import annotations -from pathlib import Path -from typing import Any, Optional, Type, Union, TYPE_CHECKING import datetime import json @@ -60,7 +58,8 @@ from ast import literal_eval from collections.abc import Mapping from copy import deepcopy -from typing import Any +from pathlib import Path +from typing import TYPE_CHECKING, Any import h5py import numpy as np @@ -1862,6 +1861,10 @@ def from_file( if distributed and file_format == fileformats.Zarr: lockfile = f"{file_}.sync" kwargs["synchronizer"] = zarr.ProcessSynchronizer(lockfile) + + # NOTE: hints is not supported for ondisk files, remove the argument in + # case it's been passed indirectly + kwargs.pop("hints", None) data = file_format.open(file_, **kwargs) toclose = file_format == fileformats.HDF5 @@ -2869,12 +2872,12 @@ def _write_distributed_datasets(dest): def _distributed_group_from_file( - fname: Union[str, Path], - comm: Optional["MPI.Comm"] = None, - hints: Union[bool, dict] = True, + fname: str | Path, + comm: MPI.Comm | None = None, + hints: bool | dict = True, convert_dataset_strings: bool = False, convert_attribute_strings: bool = True, - file_format: Type[fileformats.FileFormat] = fileformats.HDF5, + file_format: type[fileformats.FileFormat] = fileformats.HDF5, **kwargs, ): """Restore full tree from an HDF5 or Zarr into a distributed memh5 object. From 41496df365558b292fa8ccc2f59dd2c629d523b8 Mon Sep 17 00:00:00 2001 From: Richard Shaw Date: Tue, 30 Jan 2024 17:52:31 -0800 Subject: [PATCH 6/8] style: apply new black style --- caput/__init__.py | 1 + caput/cache.py | 1 + caput/config.py | 1 + caput/fileformats.py | 1 + caput/interferometry.py | 1 - caput/misc.py | 6 ++---- caput/mpiarray.py | 1 + caput/tests/test_mpiarray.py | 1 + caput/tests/test_pipeline.py | 1 + caput/tests/test_selection.py | 1 + caput/time.py | 1 + caput/tod.py | 8 +++++--- 12 files changed, 16 insertions(+), 8 deletions(-) diff --git a/caput/__init__.py b/caput/__init__.py index c811d8f7..e35cbc9b 100644 --- a/caput/__init__.py +++ b/caput/__init__.py @@ -17,6 +17,7 @@ tod weighted_median """ + from . import _version __version__ = _version.get_versions()["version"] diff --git a/caput/cache.py b/caput/cache.py index 20d8f855..1588228c 100644 --- a/caput/cache.py +++ b/caput/cache.py @@ -1,4 +1,5 @@ """Tools for caching expensive calculations.""" + import weakref import numpy as np diff --git a/caput/config.py b/caput/config.py index 1841bbb8..3e5081ad 100644 --- a/caput/config.py +++ b/caput/config.py @@ -49,6 +49,7 @@ Richard 40.0 Sooty """ + from __future__ import annotations from typing import TYPE_CHECKING diff --git a/caput/fileformats.py b/caput/fileformats.py index 9c01f45f..d134b856 100644 --- a/caput/fileformats.py +++ b/caput/fileformats.py @@ -1,4 +1,5 @@ """Interface for file formats supported by caput: HDF5 and Zarr.""" + import logging import os import shutil diff --git a/caput/interferometry.py b/caput/interferometry.py index 8032c6e8..734996ce 100644 --- a/caput/interferometry.py +++ b/caput/interferometry.py @@ -11,7 +11,6 @@ - :py:meth:`fringestop_phase` """ - import numpy as np diff --git a/caput/misc.py b/caput/misc.py index c8a80200..c57860bb 100644 --- a/caput/misc.py +++ b/caput/misc.py @@ -194,15 +194,13 @@ def open_h5py_mpi( mode: str, use_mpi: bool = True, comm: Optional["MPI.Comm"] = None, -) -> h5py.File: - ... +) -> h5py.File: ... @overload def open_h5py_mpi( f: h5py.Group, mode: str, use_mpi: bool = True, comm: Optional["MPI.Comm"] = None -) -> h5py.Group: - ... +) -> h5py.Group: ... def open_h5py_mpi(f, mode, use_mpi=True, comm=None): diff --git a/caput/mpiarray.py b/caput/mpiarray.py index c6264bc1..0fe6fcdc 100644 --- a/caput/mpiarray.py +++ b/caput/mpiarray.py @@ -236,6 +236,7 @@ intermediate pickling process, which can lead to malformed arrays. """ + import logging import os import time diff --git a/caput/tests/test_mpiarray.py b/caput/tests/test_mpiarray.py index 294ba1dd..7e94da2a 100644 --- a/caput/tests/test_mpiarray.py +++ b/caput/tests/test_mpiarray.py @@ -4,6 +4,7 @@ $ mpirun -np 4 python test_mpiarray.py """ + from typing import Union from packaging import version import pytest diff --git a/caput/tests/test_pipeline.py b/caput/tests/test_pipeline.py index a61fb32c..ccef8d63 100644 --- a/caput/tests/test_pipeline.py +++ b/caput/tests/test_pipeline.py @@ -1,4 +1,5 @@ """Test running the caput.pipeline.""" + from caput.tests import conftest diff --git a/caput/tests/test_selection.py b/caput/tests/test_selection.py index 66db5294..1d45d95a 100644 --- a/caput/tests/test_selection.py +++ b/caput/tests/test_selection.py @@ -1,4 +1,5 @@ """Serial version of the selection tests.""" + import pytest from pytest_lazyfixture import lazy_fixture import numpy as np diff --git a/caput/time.py b/caput/time.py index 0e9cb263..d2b3b708 100644 --- a/caput/time.py +++ b/caput/time.py @@ -117,6 +117,7 @@ .. _`IERS constants`: http://hpiers.obspm.fr/eop-pc/models/constants.html """ + import warnings from datetime import datetime diff --git a/caput/tod.py b/caput/tod.py index b122214f..e8edb136 100644 --- a/caput/tod.py +++ b/caput/tod.py @@ -390,9 +390,11 @@ def dataset_filter(d): # Just copy it. out.create_index_map( axis, - memh5.ensure_unicode(index_map) - if convert_dataset_strings - else index_map, + ( + memh5.ensure_unicode(index_map) + if convert_dataset_strings + else index_map + ), ) memh5.copyattrs(first_data.index_attrs[axis], out.index_attrs[axis]) From 443dd4951d5e1d2d799433c7f263791596cf245e Mon Sep 17 00:00:00 2001 From: Richard Shaw Date: Mon, 5 Feb 2024 16:44:27 -0800 Subject: [PATCH 7/8] test: ensure tests work with pytest 8.0 This swaps from using the pytest-lazy-fixture plugin to the pytest-lazy-fixtures plugin which supports the underlying changes in pytest 8. --- .github/workflows/main.yml | 4 +- caput/tests/test_memh5.py | 60 +++++++++++++------------- caput/tests/test_memh5_parallel.py | 10 ++--- caput/tests/test_mpiarray.py | 6 +-- caput/tests/test_selection.py | 10 ++--- caput/tests/test_selection_parallel.py | 6 +-- 6 files changed, 48 insertions(+), 48 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index a885960c..5545ca83 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -26,7 +26,7 @@ jobs: - name: Install pip dependencies run: | - pip install ruff pytest black mpi4py pyinstrument psutil pytest-lazy-fixture + pip install ruff pytest black mpi4py pyinstrument psutil pytest-lazy-fixtures pip install -r requirements.txt pip install -e .[compression] @@ -62,7 +62,7 @@ jobs: pip install -r requirements.txt pip install zarr==2.11.3 pip install mpi4py numcodecs>=0.7.3 bitshuffle - pip install pytest pytest-lazy-fixture + pip install pytest pytest-lazy-fixtures pip install -e . - name: Run serial tests diff --git a/caput/tests/test_memh5.py b/caput/tests/test_memh5.py index c09bb06c..abcd818b 100644 --- a/caput/tests/test_memh5.py +++ b/caput/tests/test_memh5.py @@ -10,7 +10,7 @@ import h5py import numpy as np import pytest -from pytest_lazyfixture import lazy_fixture +from pytest_lazy_fixtures import lf import zarr import copy @@ -183,8 +183,8 @@ def assertAttrsEqual(a, b): @pytest.mark.parametrize( "test_file,file_open_function", [ - (lazy_fixture("filled_h5_file"), h5py.File), - (lazy_fixture("filled_zarr_file"), zarr.open_group), + (lf("filled_h5_file"), h5py.File), + (lf("filled_zarr_file"), zarr.open_group), ], ) def test_file_sanity(test_file, file_open_function): @@ -196,12 +196,12 @@ def test_file_sanity(test_file, file_open_function): @pytest.mark.parametrize( "test_file,file_open_function,file_format", [ - (lazy_fixture("filled_h5_file"), h5py.File, None), - (lazy_fixture("filled_zarr_file"), zarr.open_group, None), - (lazy_fixture("filled_zarrzip_file"), zarr.open_group, None), - (lazy_fixture("filled_h5_file"), h5py.File, fileformats.HDF5), - (lazy_fixture("filled_zarr_file"), zarr.open_group, fileformats.Zarr), - (lazy_fixture("filled_zarrzip_file"), zarr.open_group, fileformats.Zarr), + (lf("filled_h5_file"), h5py.File, None), + (lf("filled_zarr_file"), zarr.open_group, None), + (lf("filled_zarrzip_file"), zarr.open_group, None), + (lf("filled_h5_file"), h5py.File, fileformats.HDF5), + (lf("filled_zarr_file"), zarr.open_group, fileformats.Zarr), + (lf("filled_zarrzip_file"), zarr.open_group, fileformats.Zarr), ], ) def test_to_from_file(test_file, file_open_function, file_format): @@ -223,8 +223,8 @@ def test_to_from_file(test_file, file_open_function, file_format): @pytest.mark.parametrize( "test_file,file_format", [ - (lazy_fixture("filled_h5_file"), fileformats.HDF5), - (lazy_fixture("filled_zarr_file"), fileformats.Zarr), + (lf("filled_h5_file"), fileformats.HDF5), + (lf("filled_zarr_file"), fileformats.Zarr), ], ) def test_memdisk(test_file, file_format): @@ -253,8 +253,8 @@ def test_memdisk(test_file, file_format): @pytest.mark.parametrize( "test_file,file_format", [ - (lazy_fixture("filled_h5_file"), fileformats.HDF5), - (lazy_fixture("filled_zarr_file"), fileformats.Zarr), + (lf("filled_h5_file"), fileformats.HDF5), + (lf("filled_zarr_file"), fileformats.Zarr), ], ) def test_compression(test_file, file_format, compression, compression_opts, chunks): @@ -302,10 +302,10 @@ class TempSubClass(memh5.MemDiskGroup): @pytest.mark.parametrize( "test_file,file_format", [ - (lazy_fixture("h5_file"), fileformats.HDF5), - (lazy_fixture("zarr_file"), fileformats.Zarr), - (lazy_fixture("h5_file"), None), - (lazy_fixture("zarr_file"), None), + (lf("h5_file"), fileformats.HDF5), + (lf("zarr_file"), fileformats.Zarr), + (lf("h5_file"), None), + (lf("zarr_file"), None), ], ) def test_io(test_file, file_format): @@ -395,9 +395,9 @@ def zarrzip_basiccont_file(zarr_basiccont_file): @pytest.mark.parametrize( "test_file,file_format", [ - (lazy_fixture("h5_basiccont_file"), fileformats.HDF5), - (lazy_fixture("zarr_basiccont_file"), fileformats.Zarr), - (lazy_fixture("zarrzip_basiccont_file"), fileformats.Zarr), + (lf("h5_basiccont_file"), fileformats.HDF5), + (lf("zarr_basiccont_file"), fileformats.Zarr), + (lf("zarrzip_basiccont_file"), fileformats.Zarr), ], ) def test_access(test_file, file_format): @@ -420,8 +420,8 @@ def test_access(test_file, file_format): @pytest.mark.parametrize( "test_file,file_format", [ - (lazy_fixture("h5_basiccont_file"), fileformats.HDF5), - (lazy_fixture("zarr_basiccont_file"), fileformats.Zarr), + (lf("h5_basiccont_file"), fileformats.HDF5), + (lf("zarr_basiccont_file"), fileformats.Zarr), ], ) def test_history(test_file, file_format): @@ -459,8 +459,8 @@ def test_history(test_file, file_format): @pytest.mark.parametrize( "test_file,file_format", [ - (lazy_fixture("h5_file"), fileformats.HDF5), - (lazy_fixture("zarr_file"), fileformats.Zarr), + (lf("h5_file"), fileformats.HDF5), + (lf("zarr_file"), fileformats.Zarr), ], ) def test_to_from__file_unicode(test_file, file_format): @@ -519,8 +519,8 @@ def test_to_from__file_unicode(test_file, file_format): @pytest.mark.parametrize( "test_file,file_format", [ - (lazy_fixture("h5_file"), fileformats.HDF5), - (lazy_fixture("zarr_file"), fileformats.Zarr), + (lf("h5_file"), fileformats.HDF5), + (lf("zarr_file"), fileformats.Zarr), ], ) def test_failure(test_file, file_format): @@ -537,8 +537,8 @@ def test_failure(test_file, file_format): @pytest.mark.parametrize( "test_file,file_format", [ - (lazy_fixture("h5_file"), fileformats.HDF5), - (lazy_fixture("zarr_file"), fileformats.Zarr), + (lf("h5_file"), fileformats.HDF5), + (lf("zarr_file"), fileformats.Zarr), ], ) def test_to_from_hdf5(test_file, file_format): @@ -567,8 +567,8 @@ def test_to_from_hdf5(test_file, file_format): @pytest.mark.parametrize( "test_file,file_format", [ - (lazy_fixture("h5_file"), fileformats.HDF5), - (lazy_fixture("zarr_file"), fileformats.Zarr), + (lf("h5_file"), fileformats.HDF5), + (lf("zarr_file"), fileformats.Zarr), ], ) def test_json_failure(test_file, file_format): diff --git a/caput/tests/test_memh5_parallel.py b/caput/tests/test_memh5_parallel.py index 5d1667a3..4c0c975f 100644 --- a/caput/tests/test_memh5_parallel.py +++ b/caput/tests/test_memh5_parallel.py @@ -1,7 +1,7 @@ """Unit tests for the parallel features of the memh5 module.""" import pytest -from pytest_lazyfixture import lazy_fixture +from pytest_lazy_fixtures import lf import numpy as np import h5py import zarr @@ -62,9 +62,9 @@ def test_create_dataset(): @pytest.mark.parametrize( "test_file,file_open_function,file_format", [ - (lazy_fixture("h5_file_distributed"), h5py.File, fileformats.HDF5), + (lf("h5_file_distributed"), h5py.File, fileformats.HDF5), ( - lazy_fixture("zarr_file_distributed"), + lf("zarr_file_distributed"), zarr.open_group, fileformats.Zarr, ), @@ -173,9 +173,9 @@ def test_io( @pytest.mark.parametrize( "test_file,file_open_function,file_format", [ - (lazy_fixture("h5_file_distributed"), h5py.File, fileformats.HDF5), + (lf("h5_file_distributed"), h5py.File, fileformats.HDF5), ( - lazy_fixture("zarr_file_distributed"), + lf("zarr_file_distributed"), zarr.open_group, fileformats.Zarr, ), diff --git a/caput/tests/test_mpiarray.py b/caput/tests/test_mpiarray.py index 7e94da2a..2b582539 100644 --- a/caput/tests/test_mpiarray.py +++ b/caput/tests/test_mpiarray.py @@ -8,7 +8,7 @@ from typing import Union from packaging import version import pytest -from pytest_lazyfixture import lazy_fixture +from pytest_lazy_fixtures import lf import h5py import numpy as np import zarr @@ -126,9 +126,9 @@ def test_wrap(): @pytest.mark.parametrize( "filename, file_open_function, file_format", [ - (lazy_fixture("h5_file_distributed"), h5py.File, fileformats.HDF5), + (lf("h5_file_distributed"), h5py.File, fileformats.HDF5), ( - lazy_fixture("zarr_file_distributed"), + lf("zarr_file_distributed"), zarr.open_group, fileformats.Zarr, ), diff --git a/caput/tests/test_selection.py b/caput/tests/test_selection.py index 1d45d95a..859842c2 100644 --- a/caput/tests/test_selection.py +++ b/caput/tests/test_selection.py @@ -1,7 +1,7 @@ """Serial version of the selection tests.""" import pytest -from pytest_lazyfixture import lazy_fixture +from pytest_lazy_fixtures import lf import numpy as np from caput.memh5 import MemGroup @@ -40,8 +40,8 @@ def zarr_file_select(datasets, zarr_file): @pytest.mark.parametrize( "container_on_disk, file_format", [ - (lazy_fixture("h5_file_select"), fileformats.HDF5), - (lazy_fixture("zarr_file_select"), fileformats.Zarr), + (lf("h5_file_select"), fileformats.HDF5), + (lf("zarr_file_select"), fileformats.Zarr), ], ) def test_file_select(container_on_disk, file_format): @@ -57,9 +57,9 @@ def test_file_select(container_on_disk, file_format): @pytest.mark.parametrize( "container_on_disk, file_format", [ - (lazy_fixture("h5_file_select"), fileformats.HDF5), + (lf("h5_file_select"), fileformats.HDF5), pytest.param( - lazy_fixture("zarr_file_select"), + lf("zarr_file_select"), fileformats.Zarr, marks=pytest.mark.xfail(reason="Zarr doesn't support index selections."), ), diff --git a/caput/tests/test_selection_parallel.py b/caput/tests/test_selection_parallel.py index 1b1c925f..887c4688 100644 --- a/caput/tests/test_selection_parallel.py +++ b/caput/tests/test_selection_parallel.py @@ -6,7 +6,7 @@ from mpi4py import MPI import numpy as np import pytest -from pytest_lazyfixture import lazy_fixture +from pytest_lazy_fixtures import lf from caput import mpiutil, mpiarray, fileformats from caput.memh5 import MemGroup @@ -51,8 +51,8 @@ def xfail_zarr_listsel(request): @pytest.mark.parametrize( "file_name, file_format", [ - (lazy_fixture("h5_file"), fileformats.HDF5), - (lazy_fixture("zarr_file"), fileformats.Zarr), + (lf("h5_file"), fileformats.HDF5), + (lf("zarr_file"), fileformats.Zarr), ], ) @pytest.mark.parametrize("fsel", [slice(1, 8, 2), slice(5, 8, 2)]) From 8e196139eb2468d85cdc78d9bca05e24a9e7590a Mon Sep 17 00:00:00 2001 From: Richard Shaw Date: Mon, 5 Feb 2024 16:53:07 -0800 Subject: [PATCH 8/8] fix: dictionary test issue found by ruff Also update the ruff config to avoid deprecated usage. --- caput/scripts/runner.py | 2 +- pyproject.toml | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/caput/scripts/runner.py b/caput/scripts/runner.py index 30a08722..16ff0983 100755 --- a/caput/scripts/runner.py +++ b/caput/scripts/runner.py @@ -484,7 +484,7 @@ def queue( if sfile != dfile: shutil.copyfile(sfile, dfile) - if "modules" in rconf and rconf["modules"]: + if rconf.get("modules"): modules = rconf["modules"] modules = (modules,) if isinstance(modules, str) else modules modstr = "module purge\nmodule load " diff --git a/pyproject.toml b/pyproject.toml index 56897760..e4b312dc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -19,10 +19,10 @@ requires = [ # flake8-blind-except ('BLE') # flake8-comprehensions ('C4') # flake8-return ('RET') -select = ["E", "D", "F", "I", "UP", "NPY", "RUF", "BLE", "C4", "RET"] +lint.select = ["E", "D", "F", "I", "UP", "NPY", "RUF", "BLE", "C4", "RET"] # E203, W503 -ignore = [ +lint.ignore = [ "E501", # E501: line length violations. Enforce these with `black` "E741", # E741: Ambiguous variable name "D105", # D105: Missing docstring in magic method