diff --git a/.github/stubtest-allowlist b/.github/stubtest-allowlist new file mode 100644 index 000000000..a3bd3a1a4 --- /dev/null +++ b/.github/stubtest-allowlist @@ -0,0 +1,33 @@ +netCDF4.AccessModeOptions +netCDF4.CompressionLevelOptions +netCDF4.CompressionOptions +netCDF4.DatatypeOptions +netCDF4.DimensionsOptions +netCDF4.DiskFormatOptions +netCDF4.EndianOptions +netCDF4.FormatOptions +netCDF4.QuantizeOptions +netCDF4.CalendarOptions +netCDF4.ellipsis +netCDF4.DateTimeArray +netCDF4.FiltersDict +netCDF4.SzipInfo +netCDF4.BloscInfo +netCDF4.BoolInt +netCDF4.GetSetItemKey +netCDF4.T_Datatype +netCDF4.T_DatatypeNC +netCDF4.Dataset.__dealloc +netCDF4.Dimension.__reduce_cython__ +netCDF4.Dimension.__setstate_cython__ +netCDF4.Variable.auto_complex +netCDF4._netCDF4.Dataset.__dealloc +netCDF4._netCDF4.Dimension.__reduce_cython__ +netCDF4._netCDF4.Dimension.__setstate_cython__ +netCDF4._netCDF4.NC_DISKLESS +netCDF4._netCDF4.NC_PERSIST +netCDF4._netCDF4.Variable.auto_complex +netCDF4._netCDF4.__reduce_cython__ +netCDF4._netCDF4.__setstate_cython__ +netCDF4._netCDF4.__test__ +netCDF4.utils.bytes \ No newline at end of file diff --git a/.github/workflows/build_master.yml b/.github/workflows/build_master.yml index 97dea032d..f8f841636 100644 --- a/.github/workflows/build_master.yml +++ b/.github/workflows/build_master.yml @@ -47,13 +47,14 @@ jobs: - name: Install python dependencies via pip run: | python -m pip install --upgrade pip - pip install numpy cython cftime pytest twine wheel check-manifest mpi4py + pip install numpy cython cftime pytest twine wheel check-manifest mpi4py mypy types-setuptools - name: Install netcdf4-python run: | export PATH=${NETCDF_DIR}/bin:${PATH} export NETCDF_PLUGIN_DIR=${{ github.workspace }}/netcdf-c/plugins/plugindir python setup.py install + - name: Test run: | export PATH=${NETCDF_DIR}/bin:${PATH} @@ -78,3 +79,9 @@ jobs: else echo "hdf5 compressed mpi test passed!" fi + + - name: Stubtest + run: | + stubtest netCDF4 --allowlist .github/stubtest-allowlist --mypy-config-file=pyproject.toml + mypy test + mypy examples diff --git a/.gitignore b/.gitignore index a808b3552..f188f233a 100644 --- a/.gitignore +++ b/.gitignore @@ -2,8 +2,11 @@ build/ *.pyc dist/ *.egg-info/ -netCDF4/_netCDF4.c -netCDF4/*.so +__pycache__ +.mypy_cache +src/netCDF4/*.c +src/netCDF4/*.so +src/netCDF4/*.pyd include/constants.pyx include/parallel_support_imports.pxi netcdftime/_netcdftime.c diff --git a/Changelog b/Changelog index ddf5451c5..1b1157406 100644 --- a/Changelog +++ b/Changelog @@ -1,3 +1,7 @@ + version 1.7.2 (tag v1.7.2rel) +=============================== + * add static type hints (PR #1302) + version 1.7.1 (tag v1.7.1rel) =============================== * include nc_complex source code from v0.2.0 tag (instead of using submodule). diff --git a/MANIFEST.in b/MANIFEST.in index 0eec8242f..3b055f8f3 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -15,6 +15,8 @@ include src/netCDF4/_netCDF4.pyx exclude src/netCDF4/_netCDF4.c include src/netCDF4/utils.py include src/netCDF4/plugins/empty.txt +include src/netCDF4/py.typed +include src/netCDF4/*.pyi include include/netCDF4.pxi include include/mpi-compat.h include include/membuf.pyx diff --git a/examples/bench_compress.py b/examples/bench_compress.py index 2b4680c55..39bffe906 100644 --- a/examples/bench_compress.py +++ b/examples/bench_compress.py @@ -13,7 +13,7 @@ ntrials = 10 sys.stdout.write('reading and writing a %s by %s by %s by %s random array ..\n'%(n1dim,n2dim,n3dim,n4dim)) sys.stdout.write('(average of %s trials)\n' % ntrials) -array = netCDF4.utils._quantize(uniform(size=(n1dim,n2dim,n3dim,n4dim)),4) +array = netCDF4.utils._quantize(uniform(size=(n1dim,n2dim,n3dim,n4dim)),4) # type: ignore def write_netcdf(filename,zlib=False,shuffle=False,complevel=6): diff --git a/examples/mpi_example.py b/examples/mpi_example.py index 0bebfe675..7966bdafe 100644 --- a/examples/mpi_example.py +++ b/examples/mpi_example.py @@ -1,36 +1,50 @@ # to run: mpirun -np 4 python mpi_example.py import sys +from typing import Literal from mpi4py import MPI import numpy as np from netCDF4 import Dataset + +format: Literal[ + 'NETCDF4', + 'NETCDF4_CLASSIC', + 'NETCDF3_CLASSIC', + 'NETCDF3_64BIT_OFFSET', + 'NETCDF3_64BIT_DATA' +] if len(sys.argv) == 2: - format = sys.argv[1] + format = sys.argv[1] # type: ignore else: format = 'NETCDF4_CLASSIC' + rank = MPI.COMM_WORLD.rank # The process ID (integer 0-3 for 4-process run) if rank == 0: - print('Creating file with format {}'.format(format)) + print('Creating file with format {}'.format(format)) nc = Dataset('parallel_test.nc', 'w', parallel=True, comm=MPI.COMM_WORLD, - info=MPI.Info(),format=format) + info=MPI.Info(), format=format) # below should work also - MPI_COMM_WORLD and MPI_INFO_NULL will be used. #nc = Dataset('parallel_test.nc', 'w', parallel=True) d = nc.createDimension('dim',4) v = nc.createVariable('var', np.int32, 'dim') v[rank] = rank + # switch to collective mode, rewrite the data. v.set_collective(True) v[rank] = rank nc.close() + # reopen the file read-only, check the data nc = Dataset('parallel_test.nc', parallel=True, comm=MPI.COMM_WORLD, - info=MPI.Info()) + info=MPI.Info()) assert rank==nc['var'][rank] nc.close() + # reopen the file in append mode, modify the data on the last rank. nc = Dataset('parallel_test.nc', 'a',parallel=True, comm=MPI.COMM_WORLD, - info=MPI.Info()) + info=MPI.Info()) if rank == 3: v[rank] = 2*rank nc.close() + # reopen the file read-only again, check the data. # leave out the comm and info kwargs to check that the defaults # (MPI_COMM_WORLD and MPI_INFO_NULL) work. diff --git a/examples/test_stringarr.py b/examples/test_stringarr.py index 758c4a749..7644cd59a 100644 --- a/examples/test_stringarr.py +++ b/examples/test_stringarr.py @@ -1,5 +1,6 @@ from netCDF4 import Dataset, stringtochar, chartostring import random, numpy +from typing import Final # test utilities for converting arrays of fixed-length strings # to arrays of characters (with an extra dimension), and vice-versa. @@ -16,7 +17,7 @@ FILE_NAME = 'tst_stringarr.nc' -FILE_FORMAT = 'NETCDF4_CLASSIC' +FILE_FORMAT: Final = 'NETCDF4_CLASSIC' chars = '1234567890aabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' nc = Dataset(FILE_NAME,'w',format=FILE_FORMAT) @@ -26,7 +27,6 @@ nc.createDimension('nchar',nchar) v = nc.createVariable('strings','S1',('n1','n2','nchar')) for nrec in range(nrecs): - data = [] data = numpy.empty((n2,),'S'+repr(nchar)) # fill data with random nchar character strings for n in range(n2): diff --git a/examples/threaded_read.py b/examples/threaded_read.py index 91676911b..229f0379a 100644 --- a/examples/threaded_read.py +++ b/examples/threaded_read.py @@ -28,7 +28,7 @@ nc.close() # Queue them up -items = queue.Queue() +items: queue.Queue = queue.Queue() for data,fname in zip(datal,fnames): items.put(fname) diff --git a/examples/tutorial.py b/examples/tutorial.py index d48fd1679..67573d8ae 100644 --- a/examples/tutorial.py +++ b/examples/tutorial.py @@ -1,3 +1,4 @@ +from typing import Literal from netCDF4 import Dataset # code from tutorial. @@ -26,21 +27,21 @@ def walktree(top): print(child) # dimensions. -level = rootgrp.createDimension('level', None) -time = rootgrp.createDimension('time', None) -lat = rootgrp.createDimension('lat', 73) -lon = rootgrp.createDimension('lon', 144) +level_dim = rootgrp.createDimension('level', None) +time_dim = rootgrp.createDimension('time', None) +lat_dim = rootgrp.createDimension('lat', 73) +lon_dim = rootgrp.createDimension('lon', 144) print(rootgrp.dimensions) -print(len(lon)) -print(lon.isunlimited()) -print(time.isunlimited()) +print(len(lon_dim)) +print(lon_dim.isunlimited()) +print(time_dim.isunlimited()) for dimobj in rootgrp.dimensions.values(): print(dimobj) -print(time) +print(time_dim) # variables. times = rootgrp.createVariable('time','f8',('time',)) @@ -68,7 +69,8 @@ def walktree(top): levels.units = 'hPa' temp.units = 'K' times.units = 'hours since 0001-01-01 00:00:00.0' -times.calendar = 'gregorian' +calendar: Literal['gregorian'] = 'gregorian' +times.calendar = calendar for name in rootgrp.ncattrs(): print('Global attr', name, '=', getattr(rootgrp,name)) @@ -111,49 +113,49 @@ def walktree(top): dates = [datetime(2001,3,1)+n*timedelta(hours=12) for n in range(temp.shape[0])] times[:] = date2num(dates,units=times.units,calendar=times.calendar) print("time values (in units {}):\n{}".format(times.units, times[:])) -dates = num2date(times[:],units=times.units,calendar=times.calendar) -print("dates corresponding to time values:\n{}".format(dates)) +dates_array = num2date(times[:],units=times.units,calendar=times.calendar) +print("dates corresponding to time values:\n{}".format(dates_array)) rootgrp.close() # create a series of netCDF files with a variable sharing # the same unlimited dimension. for nfile in range(10): - f = Dataset('mftest'+repr(nfile)+'.nc','w',format='NETCDF4_CLASSIC') - f.createDimension('x',None) - x = f.createVariable('x','i',('x',)) - x[0:10] = np.arange(nfile*10,10*(nfile+1)) - f.close() + nc = Dataset('mftest'+repr(nfile)+'.nc','w',format='NETCDF4_CLASSIC') + nc.createDimension('x',None) + x_var = nc.createVariable('x','i',('x',)) + x_var[0:10] = np.arange(nfile*10,10*(nfile+1)) + nc.close() # now read all those files in at once, in one Dataset. from netCDF4 import MFDataset -f = MFDataset('mftest*nc') -print(f.variables['x'][:]) +nc = MFDataset('mftest*nc') +print(nc.variables['x'][:]) # example showing how to save numpy complex arrays using compound types. -f = Dataset('complex.nc','w') +nc = Dataset('complex.nc','w') size = 3 # length of 1-d complex array # create sample complex data. datac = np.exp(1j*(1.+np.linspace(0, np.pi, size))) print(datac.dtype) # create complex128 compound data type. complex128 = np.dtype([('real',np.float64),('imag',np.float64)]) -complex128_t = f.createCompoundType(complex128,'complex128') +complex128_t = nc.createCompoundType(complex128,'complex128') # create a variable with this data type, write some data to it. -f.createDimension('x_dim',None) -v = f.createVariable('cmplx_var',complex128_t,'x_dim') +nc.createDimension('x_dim',None) +var_complex = nc.createVariable('cmplx_var',complex128_t,'x_dim') data = np.empty(size,complex128) # numpy structured array data['real'] = datac.real; data['imag'] = datac.imag -v[:] = data +var_complex[:] = data # close and reopen the file, check the contents. -f.close() -f = Dataset('complex.nc') -print(f) -print(f.variables['cmplx_var']) -print(f.cmptypes) -print(f.cmptypes['complex128']) -v = f.variables['cmplx_var'] -print(v.shape) -datain = v[:] # read in all the data into a numpy structured array +nc.close() +nc = Dataset('complex.nc') +print(nc) +print(nc.variables['cmplx_var']) +print(nc.cmptypes) +print(nc.cmptypes['complex128']) +var_complex = nc.variables['cmplx_var'] +print(var_complex.shape) +datain = var_complex[:] # read in all the data into a numpy structured array # create an empty numpy complex array datac2 = np.empty(datain.shape,np.complex128) # .. fill it with contents of structured array. @@ -163,9 +165,9 @@ def walktree(top): print(datac2.dtype,datac2) # more complex compound type example. -f = Dataset('compound_example.nc','w') # create a new dataset. +nc = Dataset('compound_example.nc','w') # create a new dataset. # create an unlimited dimension call 'station' -f.createDimension('station',None) +nc.createDimension('station',None) # define a compound data type (can contain arrays, or nested compound types). winddtype = np.dtype([('speed','f4'),('direction','i4')]) statdtype = np.dtype([('latitude', 'f4'), ('longitude', 'f4'), @@ -176,9 +178,9 @@ def walktree(top): # called using the createCompoundType Dataset method. # create a compound type for vector wind which will be nested inside # the station data type. This must be done first! -wind_data_t = f.createCompoundType(winddtype,'wind_data') +wind_data_t = nc.createCompoundType(winddtype,'wind_data') # now that wind_data_t is defined, create the station data type. -station_data_t = f.createCompoundType(statdtype,'station_data') +station_data_t = nc.createCompoundType(statdtype,'station_data') # create nested compound data types to hold the units variable attribute. winddtype_units = np.dtype([('speed','S12'),('direction','S12')]) statdtype_units = np.dtype([('latitude', 'S12'), ('longitude', 'S12'), @@ -188,11 +190,11 @@ def walktree(top): ('press_sounding','S12')]) # create the wind_data_units type first, since it will nested inside # the station_data_units data type. -wind_data_units_t = f.createCompoundType(winddtype_units,'wind_data_units') +wind_data_units_t = nc.createCompoundType(winddtype_units,'wind_data_units') station_data_units_t =\ -f.createCompoundType(statdtype_units,'station_data_units') +nc.createCompoundType(statdtype_units,'station_data_units') # create a variable of of type 'station_data_t' -statdat = f.createVariable('station_obs', station_data_t, ('station',)) +statdat = nc.createVariable('station_obs', station_data_t, ('station',)) # create a numpy structured array, assign data to it. data = np.empty(1,statdtype) data['latitude'] = 40. @@ -209,7 +211,7 @@ def walktree(top): statdat[1] = np.array((40.78,-73.99,(-12.5,90), (290.2,282.5,279.,277.9,276.,266.,264.1,260.,255.5,243.), range(900,400,-50),'New York, NY'),data.dtype) -print(f.cmptypes) +print(nc.cmptypes) windunits = np.empty(1,winddtype_units) stationobs_units = np.empty(1,statdtype_units) windunits['speed'] = 'm/s' @@ -223,21 +225,21 @@ def walktree(top): print(stationobs_units.dtype) statdat.units = stationobs_units # close and reopen the file. -f.close() -f = Dataset('compound_example.nc') -print(f) -statdat = f.variables['station_obs'] +nc.close() +nc = Dataset('compound_example.nc') +print(nc) +statdat = nc.variables['station_obs'] print(statdat) # print out data in variable. print('data in a variable of compound type:') print(statdat[:]) -f.close() +nc.close() -f = Dataset('tst_vlen.nc','w') -vlen_t = f.createVLType(np.int32, 'phony_vlen') -x = f.createDimension('x',3) -y = f.createDimension('y',4) -vlvar = f.createVariable('phony_vlen_var', vlen_t, ('y','x')) +nc = Dataset('tst_vlen.nc','w') +vlen_t = nc.createVLType(np.int32, 'phony_vlen') +x = nc.createDimension('x',3) +y = nc.createDimension('y',4) +vlvar = nc.createVariable('phony_vlen_var', vlen_t, ('y','x')) import random data = np.empty(len(y)*len(x),object) for n in range(len(y)*len(x)): @@ -246,11 +248,11 @@ def walktree(top): vlvar[:] = data print(vlvar) print('vlen variable =\n',vlvar[:]) -print(f) -print(f.variables['phony_vlen_var']) -print(f.vltypes['phony_vlen']) -z = f.createDimension('z', 10) -strvar = f.createVariable('strvar',str,'z') +print(nc) +print(nc.variables['phony_vlen_var']) +print(nc.vltypes['phony_vlen']) +z = nc.createDimension('z', 10) +strvar = nc.createVariable('strvar',str,'z') chars = '1234567890aabcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ' data = np.empty(10,object) for n in range(10): @@ -258,48 +260,48 @@ def walktree(top): data[n] = ''.join([random.choice(chars) for i in range(stringlen)]) strvar[:] = data print('variable-length string variable:\n',strvar[:]) -print(f) -print(f.variables['strvar']) -f.close() +print(nc) +print(nc.variables['strvar']) +nc.close() # Enum type example. -f = Dataset('clouds.nc','w') +nc = Dataset('clouds.nc','w') # python dict describing the allowed values and their names. enum_dict = {'Altocumulus': 7, 'Missing': 255, 'Stratus': 2, 'Clear': 0, 'Nimbostratus': 6, 'Cumulus': 4, 'Altostratus': 5, 'Cumulonimbus': 1, 'Stratocumulus': 3} # create the Enum type called 'cloud_t'. -cloud_type = f.createEnumType(np.uint8,'cloud_t',enum_dict) +cloud_type = nc.createEnumType(np.uint8,'cloud_t',enum_dict) print(cloud_type) -time = f.createDimension('time',None) +time_dim = nc.createDimension('time',None) # create a 1d variable of type 'cloud_type' called 'primary_clouds'. # The fill_value is set to the 'Missing' named value. -cloud_var = f.createVariable('primary_cloud',cloud_type,'time',\ +cloud_var = nc.createVariable('primary_cloud',cloud_type,'time',\ fill_value=enum_dict['Missing']) # write some data to the variable. cloud_var[:] = [enum_dict['Clear'],enum_dict['Stratus'],enum_dict['Cumulus'],\ enum_dict['Missing'],enum_dict['Cumulonimbus']] # close file, reopen it. -f.close() -f = Dataset('clouds.nc') -cloud_var = f.variables['primary_cloud'] +nc.close() +nc = Dataset('clouds.nc') +cloud_var = nc.variables['primary_cloud'] print(cloud_var) print(cloud_var.datatype.enum_dict) print(cloud_var[:]) -f.close() +nc.close() # dealing with strings from netCDF4 import stringtochar nc = Dataset('stringtest.nc','w',format='NETCDF4_CLASSIC') nc.createDimension('nchars',3) nc.createDimension('nstrings',None) -v = nc.createVariable('strings','S1',('nstrings','nchars')) +var = nc.createVariable('strings','S1',('nstrings','nchars')) datain = np.array(['foo','bar'],dtype='S3') -v[:] = stringtochar(datain) # manual conversion to char array -print(v[:]) # data returned as char array -v._Encoding = 'ascii' # this enables automatic conversion -v[:] = datain # conversion to char array done internally -print(v[:]) # data returned in numpy string array +var[:] = stringtochar(datain) # manual conversion to char array +print(var[:]) # data returned as char array +var._Encoding = 'ascii' # this enables automatic conversion +var[:] = datain # conversion to char array done internally +print(var[:]) # data returned in numpy string array nc.close() # strings in compound types nc = Dataset('compoundstring_example.nc','w') @@ -349,8 +351,8 @@ def walktree(top): nc_buf = nc.close() # close returns memoryview print(type(nc_buf)) # save nc_buf to disk, read it back in and check. -f = open('inmemory.nc', 'wb') -f.write(nc_buf); f.close() +f2 = open('inmemory.nc', 'wb') +f2.write(nc_buf); f2.close() nc = Dataset('inmemory.nc') print(nc) print(nc['v'][:]) diff --git a/pyproject.toml b/pyproject.toml index 3ec2da2de..8c8c8a327 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -73,7 +73,19 @@ where = ["src"] [tool.setuptools.package-data] "netCDF4.plugins" = ["lib__nc*"] +[tool.setuptools_scm] + [tool.pytest.ini_options] pythonpath = ["test"] -[tool.setuptools_scm] +[tool.mypy] +files = ["src/netCDF4"] + +[[tool.mypy.overrides]] +ignore_missing_imports = true +module = [ + "cftime.*", + "cython.*", + "filter_availability", + "matplotlib.*" +] diff --git a/src/netCDF4/__init__.py b/src/netCDF4/__init__.py index ac93047a2..25f6fad2e 100644 --- a/src/netCDF4/__init__.py +++ b/src/netCDF4/__init__.py @@ -10,13 +10,15 @@ __has_parallel4_support__, __has_pnetcdf_support__, __has_quantization_support__, __has_zstandard_support__, __has_bzip2_support__, __has_blosc_support__, __has_szip_support__, - __has_set_alignment__) + __has_set_alignment__, __has_parallel_support__, __has_ncfilter__) import os -__all__ =\ -['Dataset','Variable','Dimension','Group','MFDataset','MFTime','CompoundType','VLType','date2num','num2date','date2index','stringtochar','chartostring','stringtoarr','getlibversion','EnumType','get_chunk_cache','set_chunk_cache','set_alignment','get_alignment'] -__pdoc__ = { - 'utils': False, -} +__all__ = [ + 'Dataset', 'Variable', 'Dimension', 'Group', 'MFDataset', 'MFTime', 'CompoundType', + 'VLType', 'date2num', 'num2date', 'date2index', 'stringtochar', 'chartostring', + 'stringtoarr', 'getlibversion', 'EnumType', 'get_chunk_cache', 'set_chunk_cache', + 'set_alignment', 'get_alignment' +] +__pdoc__ = {'utils': False} # if HDF5_PLUGIN_PATH not set, point to package path if plugins live there pluginpath = os.path.join(__path__[0],'plugins') if 'HDF5_PLUGIN_PATH' not in os.environ and\ diff --git a/src/netCDF4/__init__.pyi b/src/netCDF4/__init__.pyi new file mode 100644 index 000000000..413336f0e --- /dev/null +++ b/src/netCDF4/__init__.pyi @@ -0,0 +1,636 @@ + +import os +import sys +import datetime as dt +from typing import (Any, Callable, Final, Generic, Iterable, Literal, Mapping, + NoReturn, Self, Sequence, TypeAlias, TypedDict, TypeVar, Union, overload) +from typing_extensions import Buffer + +import cftime +import numpy as np +import numpy.typing as npt + +__all__ = [ + 'Dataset', 'Variable', 'Dimension', 'Group', 'MFDataset', 'MFTime', 'CompoundType', + 'VLType', 'date2num', 'num2date', 'date2index', 'stringtochar', 'chartostring', + 'stringtoarr', 'getlibversion', 'EnumType', 'get_chunk_cache', 'set_chunk_cache', + 'set_alignment', 'get_alignment' +] +__pdoc__ = {'utils': False} + + +if sys.version_info >= (3, 10): + from types import EllipsisType + + ellipsis = EllipsisType +elif not TYPE_CHECKING: + ellipsis = type(Ellipsis) # keeps ruff happy until ruff uses typeshed + + +_DatatypeStrOptions: TypeAlias = Literal[ + 'S1', 'c', 'i1', 'b', 'B', 'u1', 'i2', 'h', 's', 'u2', 'i4', + 'i', 'l', 'u4', 'i8', 'u8', 'f4', 'f', 'f8', 'd', 'c8', 'c16' +] +_DatatypeNCOptions: TypeAlias = Union[CompoundType, VLType, EnumType] +DatatypeOptions: TypeAlias = Union[_DatatypeStrOptions, _DatatypeNCOptions, npt.DTypeLike] +T_Datatype = TypeVar("T_Datatype", bound=DatatypeOptions) +T_DatatypeNC = TypeVar("T_DatatypeNC", CompoundType, VLType, EnumType) + +DimensionsOptions: TypeAlias = Union[str, bytes, Dimension, Iterable[Union[str, bytes, Dimension]]] +CompressionOptions: TypeAlias = Literal[ + 'zlib', 'szip', 'zstd', 'blosc_lz','blosc_lz4', + 'blosc_lz4hc', 'blosc_zlib', 'blosc_zstd' +] +CompressionLevelOptions: TypeAlias = Literal[0, 1, 2, 3, 4, 5, 6, 7, 8, 9] +AccessModeOptions: TypeAlias = Literal['r', 'w', 'r+', 'a', 'x', 'rs', 'ws', 'r+s', 'as'] +FormatOptions: TypeAlias = Literal[ + 'NETCDF4', 'NETCDF4_CLASSIC', 'NETCDF3_CLASSIC', + 'NETCDF3_64BIT_OFFSET', 'NETCDF3_64BIT_DATA' +] +DiskFormatOptions: TypeAlias = Literal['NETCDF3', 'HDF5', 'HDF4', 'PNETCDF', 'DAP2', 'DAP4', 'UNDEFINED'] +QuantizeOptions: TypeAlias = Literal['BitGroom', 'BitRound', 'GranularBitRound'] +EndianOptions: TypeAlias = Literal['native', 'little', 'big'] +CalendarOptions: TypeAlias = Literal[ + 'standard', 'gregorian', 'proleptic_gregorian' 'noleap', + '365_day', '360_day', 'julian', 'all_leap', '366_day' +] +BoolInt: TypeAlias = Literal[0, 1] + +DateTimeArray: TypeAlias = npt.NDArray[np.object_] +"""numpy array of datetime.datetime or cftime.datetime""" + +GetSetItemKey: TypeAlias = ( + int + | slice + | ellipsis + | list[int | bool] + | npt.NDArray[np.integer | np.bool_] + | tuple[int | slice | ellipsis | Sequence[int | bool] | npt.NDArray[np.integer | np.bool_], ...] +) + +__version__: str +__netcdf4libversion__: str +__hdf5libversion__: str +__has_rename_grp__: BoolInt +__has_nc_inq_path__: BoolInt +__has_nc_inq_format_extended__: BoolInt +__has_nc_open_mem__: BoolInt +__has_nc_create_mem__: BoolInt +__has_cdf5_format__: BoolInt +__has_parallel4_support__: BoolInt +__has_pnetcdf_support__: BoolInt +__has_parallel_support__: BoolInt +__has_quantization_support__: BoolInt +__has_zstandard_support__: BoolInt +__has_bzip2_support__: BoolInt +__has_blosc_support__: BoolInt +__has_szip_support__: BoolInt +__has_set_alignment__: BoolInt +__has_ncfilter__: BoolInt +is_native_little: bool +is_native_big: bool +default_encoding: Final = 'utf-8' +unicode_error: Final = 'replace' +default_fillvals: dict[str, int | float | str] + + +# date2index, date2num, and num2date are actually provided by cftime and if stubs for +# cftime are completed these should be removed. +def date2index( + dates: dt.datetime | cftime.datetime | Sequence[dt.datetime | cftime.datetime] | DateTimeArray, + nctime: Variable, + calendar: CalendarOptions | None = None, + select: Literal["exact", "before", "after", "nearest"] = "exact", + has_year_zero: bool | None = None, +) -> int | npt.NDArray[np.int_]: ... +def date2num( + dates: dt.datetime | cftime.datetime | Sequence[dt.datetime | cftime.datetime] | DateTimeArray, + units: str, + calendar: CalendarOptions | None = None, + has_year_zero: bool | None = None, + longdouble: bool = False, +) -> np.number | npt.NDArray[np.number]: ... +def num2date( + times: Sequence[int | float | np.number] | npt.NDArray[np.number], + units: str, + calendar: CalendarOptions = "standard", + only_use_cftime_datetimes: bool = True, + only_use_python_datetimes: bool = False, + has_year_zero: bool | None = None, +) -> dt.datetime | DateTimeArray: ... + + +class BloscInfo(TypedDict): + compressor: Literal["blosc_lz", "blosc_lz4", "blosc_lz4hc", "blosc_zlib", "blosc_zstd"] + shuffle: Literal[0, 1, 2] + + +class SzipInfo(TypedDict): + coding: Literal["nn", "ec"] + pixels_per_block: Literal[4, 8, 16, 32] + + +class FiltersDict(TypedDict): + """Dict returned from netCDF4.Variable.filters()""" + zlib: bool + szip: Literal[False] | SzipInfo + zstd: bool + bzip2: bool + blosc: Literal[False] | BloscInfo + shuffle: bool + complevel: int + fletcher32: bool + + +class NetCDF4MissingFeatureException(Exception): + def __init__(self, feature: str, version: str): ... + + +class Dataset: + def __init__( + self, + filename: str | os.PathLike, + mode: AccessModeOptions = 'r', + clobber: bool = True, + format: FormatOptions = 'NETCDF4', + diskless: bool = False, + persist: bool = False, + keepweakref: bool = False, + memory: Buffer | int | None = None, + encoding: str | None = None, + parallel: bool = False, + comm: Any = None, + info: Any = None, + auto_complex: bool = False, + **kwargs: Any + ): ... + + @property + def name(self) -> str: ... + @property + def groups(self) -> dict[str, Group]: ... + @property + def dimensions(self) -> dict[str, Dimension]: ... + @property + def variables(self) -> dict[str, Variable[Any]]: ... + @property + def cmptypes(self) -> dict[str, CompoundType]: ... + @property + def vltypes(self) -> dict[str, VLType]: ... + @property + def enumtypes(self) -> dict[str, EnumType]: ... + @property + def data_model(self) -> FormatOptions: ... + @property + def file_format(self) -> FormatOptions: ... + @property + def disk_format(self) -> DiskFormatOptions: ... + @property + def parent(self) -> Dataset | None: ... + @property + def path(self) -> str: ... + @property + def keepweakref(self) -> bool: ... + @property + def auto_complex(self) -> bool: ... + @property + def _ncstring_attrs__(self) -> bool: ... + @property + def __orthogonal_indexing__(self) -> bool: ... + + def filepath(self, encoding: str | None = None) -> str: ... + def isopen(self) -> bool: ... + def close(self) -> memoryview: ... # only if writing and memory != None, but otherwise people ignore the return None anyway + def sync(self) -> None: ... + def set_fill_on(self) -> None: ... + def set_fill_off(self) -> None: ... + + def createDimension(self, dimname: str, size: int | None = None) -> Dimension: ... + def renameDimension( self, oldname: str, newname: str) -> None: ... + @overload + def createVariable( # type: ignore + self, + varname: str, + datatype: T_DatatypeNC, + dimensions: DimensionsOptions = (), + compression: CompressionOptions | None = None, + zlib: bool = False, + complevel: CompressionLevelOptions | None = 4, + shuffle: bool = True, + szip_coding: Literal['nn', 'ec'] = 'nn', + szip_pixels_per_block: Literal[4, 8, 16, 32] = 8, + blosc_shuffle: Literal[0, 1, 2] = 1, + fletcher32: bool = False, + contiguous: bool = False, + chunksizes: int | None = None, + endian: EndianOptions = 'native', + least_significant_digit: int | None = None, + significant_digits: int | None = None, + quantize_mode: QuantizeOptions = 'BitGroom', + fill_value: int | float | str | bytes | Literal[False] | None = None, + chunk_cache: int | None = None + ) -> Variable[T_DatatypeNC]: ... + @overload + def createVariable( + self, + varname: str, + datatype: _DatatypeStrOptions | npt.DTypeLike, + dimensions: DimensionsOptions = (), + compression: CompressionOptions | None = None, + zlib: bool = False, + complevel: CompressionLevelOptions | None = 4, + shuffle: bool = True, + szip_coding: Literal['nn', 'ec'] = 'nn', + szip_pixels_per_block: Literal[4, 8, 16, 32] = 8, + blosc_shuffle: Literal[0, 1, 2] = 1, + fletcher32: bool = False, + contiguous: bool = False, + chunksizes: int | None = None, + endian: EndianOptions = 'native', + least_significant_digit: int | None = None, + significant_digits: int | None = None, + quantize_mode: QuantizeOptions = 'BitGroom', + fill_value: int | float | str | bytes | Literal[False] | None = None, + chunk_cache: int | None = None + ) -> Variable[np.dtype]: ... + def renameVariable(self, oldname: str, newname: str) -> None: ... + def createGroup(self, groupname: str) -> Group: ... + + def renameGroup(self, oldname: str, newname: str) -> None: ... + def renameAttribute(self, oldname: str, newname: str) -> None: ... + def createCompoundType( + self, datatype: npt.DTypeLike | Sequence[tuple[str, npt.DTypeLike]], datatype_name: str + ) -> CompoundType: ... + def createVLType(self, datatype: npt.DTypeLike, datatype_name: str) -> VLType: ... + def createEnumType( + self, datatype: np.dtype[np.integer] | type[np.integer] | type[int], datatype_name: str, enum_dict: dict[str, int] + ) -> EnumType: ... + + def ncattrs(self) -> list[str]: ... + def setncattr_string(self, name: str, value: Any) -> None: ... + def setncattr(self, name: str, value: Any) -> None: ... + def setncatts(self, attdict: Mapping[str, Any]) -> None: ... + def getncattr(self, name: str, encoding: str = 'utf-8') -> Any: ... + def delncattr(self, name: str) -> None: ... + def set_auto_chartostring(self, value: bool) -> None: ... + def set_auto_maskandscale(self, value: bool) -> None: ... + def set_auto_mask(self, value: bool) -> None: ... + def set_auto_scale(self, value: bool) -> None: ... + def set_always_mask(self, value: bool) -> None: ... + def set_ncstring_attrs(self, value: bool) -> None: ... + def get_variables_by_attributes(self, **kwargs: Callable[[Any], bool] | Any) -> list[Variable]: ... + + @staticmethod + def fromcdl( + cdlfilename: str, + ncfilename: str | None = None, + mode: AccessModeOptions = 'a', + format: FormatOptions = 'NETCDF4' + ) -> Dataset: ... + @overload + def tocdl( + self, + coordvars: bool = False, + data: bool = False, + outfile: None = None + ) -> str: ... + @overload + def tocdl( + self, + coordvars: bool = False, + data: bool = False, + *, + outfile: str | os.PathLike + ) -> None: ... + + def has_blosc_filter(self) -> bool: ... + def has_zstd_filter(self) -> bool: ... + def has_bzip2_filter(self) -> bool: ... + def has_szip_filter(self) -> bool: ... + + def __getitem__(self, elem: str) -> Any: ... # should be Group | Variable, but this causes too many problems + def __setattr__(self, name: str, value: Any) -> None: ... + def __getattr__(self, name: str) -> Any: ... + def __delattr__(self, name: str): ... + def __dealloc(self) -> None: ... + def __reduce__(self) -> NoReturn: ... + def __enter__(self) -> Self: ... + def __exit__(self, atype, value, traceback) -> None: ... + + +class Group(Dataset): + def __init__(self, parent: Dataset, name: str, **kwargs: Any) -> None: ... + + def close(self) -> NoReturn: ... + + +class Dimension: + def __init__(self, grp: Dataset, name: str, size: int | None = None, **kwargs: Any) -> None: ... + + @property + def name(self) -> str: ... + @property + def size(self) -> int: ... + + def group(self) -> Dataset: ... + def isunlimited(self) -> bool: ... + + def __len__(self) -> int: ... + + +class Variable(Generic[T_Datatype]): + + @overload + def __new__( # type: ignore + self, + grp: Dataset, + name: str, + datatype: T_DatatypeNC, + dimensions: DimensionsOptions = (), + compression: CompressionOptions | None = None, + zlib: bool = False, + complevel: CompressionLevelOptions | None = 4, + shuffle: bool = True, + szip_coding: Literal['nn', 'ec'] = 'nn', + szip_pixels_per_block: Literal[4, 8, 16, 32] = 8, + blosc_shuffle: Literal[0, 1, 2] = 1, + fletcher32: bool = False, + contiguous: bool = False, + chunksizes: Sequence[int] | None = None, + endian: EndianOptions = 'native', + least_significant_digit: int | None = None, + significant_digits: int | None = None, + quantize_mode: QuantizeOptions = 'BitGroom', + fill_value: int | float | str | bytes | Literal[False] | None = None, + chunk_cache: int | None = None, + **kwargs: Any + ) -> Variable[T_DatatypeNC]: ... + + @overload + def __new__( + self, + grp: Dataset, + name: str, + datatype: _DatatypeStrOptions | npt.DTypeLike, + dimensions: DimensionsOptions = (), + compression: CompressionOptions | None = None, + zlib: bool = False, + complevel: CompressionLevelOptions | None = 4, + shuffle: bool = True, + szip_coding: Literal['nn', 'ec'] = 'nn', + szip_pixels_per_block: Literal[4, 8, 16, 32] = 8, + blosc_shuffle: Literal[0, 1, 2] = 1, + fletcher32: bool = False, + contiguous: bool = False, + chunksizes: Sequence[int] | None = None, + endian: EndianOptions = 'native', + least_significant_digit: int | None = None, + significant_digits: int | None = None, + quantize_mode: QuantizeOptions = 'BitGroom', + fill_value: int | float | str | bytes | Literal[False] | None = None, + chunk_cache: int | None = None, + **kwargs: Any + ) -> Variable[np.dtype]: ... + + + def __init__( + self, + grp: Dataset, + name: str, + datatype: T_Datatype, + dimensions: DimensionsOptions = (), + compression: CompressionOptions | None = None, + zlib: bool = False, + complevel: CompressionLevelOptions | None = 4, + shuffle: bool = True, + szip_coding: Literal['nn', 'ec'] = 'nn', + szip_pixels_per_block: Literal[4, 8, 16, 32] = 8, + blosc_shuffle: Literal[0, 1, 2] = 1, + fletcher32: bool = False, + contiguous: bool = False, + chunksizes: Sequence[int] | None = None, + endian: EndianOptions = 'native', + least_significant_digit: int | None = None, + significant_digits: int | None = None, + quantize_mode: QuantizeOptions = 'BitGroom', + fill_value: int | float | str | bytes | Literal[False] | None = None, + chunk_cache: int | None = None, + **kwargs: Any + ) -> None: ... + + @property + def name(self) -> str: ... + @property + def dtype(self) -> np.dtype | type[str]: ... + @property + def datatype(self) -> T_Datatype: ... + @property + def shape(self) -> tuple[int, ...]: ... + @property + def size(self) -> int: ... + @property + def dimensions(self) -> tuple[str, ...]: ... + @property + def ndim(self) -> int: ... + @property + def scale(self) -> bool: ... + @property + def mask(self) -> bool: ... + @property + def chartostring(self) -> bool: ... + @property + def always_mask(self) -> bool: ... + @property + def __orthogonal_indexing__(self) -> bool: ... + + def group(self) -> Dataset: ... + def ncattrs(self) -> list[str]: ... + def setncattr(self, name: str, value: Any) -> None: ... + def setncattr_string(self, name: str, value: Any) -> None: ... + def setncatts(self, attdict: Mapping[str, Any]) -> None: ... + def getncattr(self, name: str, encoding='utf-8'): ... + def delncattr(self, name: str) -> None: ... + def filters(self) -> FiltersDict: ... + def quantization(self) -> tuple[int, QuantizeOptions] | None: ... + def endian(self) -> EndianOptions: ... + def chunking(self) -> Literal['contiguous'] | list[int]: ... + def get_var_chunk_cache(self) -> tuple[int, int, float]: ... + def set_var_chunk_cache( + self, + size: int | None = None, + nelems: int | None = None, + preemption: float | None = None + ) -> None: ... + def renameAttribute(self, oldname: str, newname: str) -> None: ... + def assignValue(self, val: Any) -> None: ... + def getValue(self) -> Any: ... + def set_auto_chartostring(self, chartostring: bool) -> None: ... + def use_nc_get_vars(self, use_nc_get_vars: bool) -> None: ... + def set_auto_maskandscale(self, maskandscale: bool) -> None: ... + def set_auto_scale(self, scale: bool) -> None: ... + def set_auto_mask(self, mask: bool) -> None: ... + def set_always_mask(self, always_mask: bool) -> None: ... + def set_ncstring_attrs(self, ncstring_attrs: bool) -> None: ... + def set_collective(self, value: bool) -> None: ... + def get_dims(self) -> tuple[Dimension, ...]: ... + + def __delattr__(self, name: str) -> None: ... + def __setattr__(self, name: str, value: Any) -> None: ... + def __getattr__(self, name: str) -> Any: ... + def __getitem__(self, elem: GetSetItemKey) -> np.ndarray: ... + def __setitem__(self, elem: GetSetItemKey, data: npt.ArrayLike) -> None: ... + def __array__(self) -> np.ndarray: ... + def __len__(self) -> int: ... + + +class CompoundType: + dtype: np.dtype + dtype_view: np.dtype + name: str + + def __init__( + self, grp: Dataset, dt: npt.DTypeLike | Sequence[tuple[str, npt.DTypeLike]], dtype_name: str, **kwargs: Any + ) -> None: ... + + def __reduce__(self) -> NoReturn: ... + + +class VLType: + dtype: np.dtype + name: str | None + + def __init__(self, grp: Dataset, dt: npt.DTypeLike, dtype_name: str, **kwargs: Any) -> None: ... + + def __reduce__(self) -> NoReturn: ... + + +class EnumType: + dtype: np.dtype[np.integer] + name: str + enum_dict: Mapping[str, int] + + def __init__( + self, + grp: Dataset, + dt: np.dtype[np.integer] | type[np.integer] | type[int] | str, + dtype_name: str, + enum_dict: Mapping[str, int], + **kwargs: Any + ) -> None: ... + + def __reduce__(self) -> NoReturn: ... + + +class MFDataset(Dataset): + def __init__( + self, + files: str | Sequence[str | os.PathLike], + check: bool = False, + aggdim: str | None = None, + exclude: Sequence[str] = [], + master_file: str | os.PathLike | None = None + ) -> None: ... + + @property + def dimensions(self) -> dict[str, Dimension]: ... # this should be: dict[str, Dimension | _Dimension] + @property + def variables(self) -> dict[str, Variable[Any]]: ... # this should be: dict[str, _Variable[Any] | _Variable] + + +class _Dimension: + dimlens: list[int] + dimtolen: int + + def __init__( + self, dimname: str, dim: Dimension, dimlens: list[int], dimtotlen: int + ) -> None: ... + + def __len__(self) -> int: ... + def isunlimited(self) -> Literal[True]: ... + + +class _Variable: + dimensions: tuple[str, ...] + dtype: np.dtype | type[str] + + def __init__(self, dset: Dataset, varname: str, var: Variable[Any], recdimname: str) -> None: ... + + # shape, ndim, and name actually come from __getattr__ + @property + def shape(self) -> tuple[int, ...]: ... + @property + def ndim(self) -> int: ... + @property + def name(self) -> str: ... + + def typecode(self) -> np.dtype | type[str]: ... + def ncattrs(self) -> list[str]: ... + def _shape(self) -> tuple[int, ...]: ... + def set_auto_chartostring(self, val: bool) -> None: ... + def set_auto_maskandscale(self, val: bool) -> None: ... + def set_auto_mask(self, val: bool) -> None: ... + def set_auto_scale(self, val: bool) -> None: ... + def set_always_mask(self, val: bool) -> None: ... + + def __getattr__(self, name: str) -> Any: ... + def __getitem__(self, elem: GetSetItemKey) -> Any: ... + def __len__(self) -> int: ... + + +class MFTime(_Variable): + calendar: CalendarOptions | None + units: str | None + + def __init__( + self, + time: Variable, + units: str | None = None, + calendar: CalendarOptions | None = None + ): ... + def __getitem__(self, elem: GetSetItemKey) -> np.ndarray: ... + + +@overload +def stringtoarr( + string: str, + NUMCHARS: int, + dtype: Literal["S"] | np.dtype[np.bytes_]= "S", +) -> npt.NDArray[np.bytes_]: ... +@overload +def stringtoarr( + string: str, + NUMCHARS: int, + dtype: Literal["U"] | np.dtype[np.str_], +) -> npt.NDArray[np.str_]: ... +@overload +def stringtochar( + a: npt.NDArray[np.character], + encoding: Literal["none", "None", "bytes"], +) -> npt.NDArray[np.bytes_]: ... +@overload +def stringtochar( + a: npt.NDArray[np.character], + encoding: str = ..., +) -> npt.NDArray[np.str_] | npt.NDArray[np.bytes_]: ... +@overload +def chartostring( + b: npt.NDArray[np.character], + encoding: Literal["none", "None", "bytes"] = ..., +) -> npt.NDArray[np.bytes_]: ... +@overload +def chartostring( + b: npt.NDArray[np.character], + encoding: str = ..., +) -> npt.NDArray[np.str_] | npt.NDArray[np.bytes_]: ... + +def getlibversion() -> str: ... + +def set_alignment(threshold: int, alignment: int): ... +def get_alignment() -> tuple[int, int]: ... + +def set_chunk_cache( + size: int | None = None, + nelems: int | None = None, + preemption: float | None = None +) -> None: ... +def get_chunk_cache() -> tuple[int, int, float]: ... diff --git a/src/netCDF4/_netCDF4.pyi b/src/netCDF4/_netCDF4.pyi new file mode 100644 index 000000000..536d4f63b --- /dev/null +++ b/src/netCDF4/_netCDF4.pyi @@ -0,0 +1,19 @@ +# The definitions are intendionally done in the __init__. +# This file only exists in case someone imports from netCDF4._netCDF4 +from . import ( + Dataset, Variable, Dimension, Group, MFDataset, MFTime, CompoundType, + VLType, date2num, num2date, date2index, stringtochar, chartostring, + stringtoarr, getlibversion, EnumType, get_chunk_cache, set_chunk_cache, + set_alignment, get_alignment, default_fillvals, default_encoding, + NetCDF4MissingFeatureException, is_native_big, is_native_little, unicode_error, + __version__, __netcdf4libversion__, __hdf5libversion__, __has_rename_grp__, + __has_nc_inq_path__, __has_nc_inq_format_extended__, __has_nc_open_mem__, + __has_nc_create_mem__, __has_cdf5_format__, __has_parallel4_support__, + __has_pnetcdf_support__, __has_parallel_support__, + __has_quantization_support__, __has_zstandard_support__, + __has_bzip2_support__, __has_blosc_support__, __has_szip_support__, + __has_set_alignment__, __has_ncfilter__ +) + + +def dtype_is_complex(dtype: str) -> bool: ... diff --git a/src/netCDF4/_netCDF4.pyx b/src/netCDF4/_netCDF4.pyx index 49f62ee5c..2d9a16628 100644 --- a/src/netCDF4/_netCDF4.pyx +++ b/src/netCDF4/_netCDF4.pyx @@ -95,8 +95,8 @@ To create a netCDF file from python, you simply call the `Dataset` constructor. This is also the method used to open an existing netCDF file. If the file is open for write access (`mode='w', 'r+'` or `'a'`), you may write any type of data including new dimensions, groups, variables and -attributes. netCDF files come in five flavors (`NETCDF3_CLASSIC, -NETCDF3_64BIT_OFFSET, NETCDF3_64BIT_DATA, NETCDF4_CLASSIC`, and `NETCDF4`). +attributes. netCDF files come in five flavors (`NETCDF3_CLASSIC`, +`NETCDF3_64BIT_OFFSET`, `NETCDF3_64BIT_DATA`, `NETCDF4_CLASSIC`, and `NETCDF4`). `NETCDF3_CLASSIC` was the original netcdf binary format, and was limited to file sizes less than 2 Gb. `NETCDF3_64BIT_OFFSET` was introduced in version 3.6.0 of the library, and extended the original binary format @@ -2113,7 +2113,7 @@ def _ensure_nc_success(ierr, err_cls=RuntimeError, filename=None, extra_msg=None raise err_cls(err_str) -def dtype_is_complex(dtype: Union[str, numpy.dtype]) -> bool: +def dtype_is_complex(dtype): """Return True if dtype is a complex number""" return dtype in ("c8", "c16") @@ -2160,13 +2160,13 @@ cdef _inq_vardimid(int ncid, int varid, bint auto_complex): # only exist at the python level (not in the netCDF file). _private_atts = \ -['_grpid','_grp','_varid','groups','dimensions','variables','dtype','data_model','disk_format', +('_grpid','_grp','_varid','groups','dimensions','variables','dtype','data_model','disk_format', '_nunlimdim','path','parent','ndim','mask','scale','cmptypes','vltypes','enumtypes','_isprimitive', 'file_format','_isvlen','_isenum','_iscompound','_cmptype','_vltype','_enumtype','name', - '__orthogoral_indexing__','keepweakref','_has_lsd', + '__orthogoral_indexing__','keepweakref','_has_lsd','always_mask', '_buffer','chartostring','_use_get_vars','_ncstring_attrs__', 'auto_complex' -] +) cdef class Dataset: """ @@ -2556,7 +2556,7 @@ strings. return self.__str__() def __str__(self): - ncdump = [repr(type(self))] + ncdump = [repr(type(self)).replace("._netCDF4", "")] dimnames = tuple(_tostr(dimname)+'(%s)'%len(self.dimensions[dimname])\ for dimname in self.dimensions.keys()) varnames = tuple(\ @@ -3718,12 +3718,13 @@ Read-only class variables: def __str__(self): if not dir(self._grp): return 'Dimension object no longer valid' + typ = repr(type(self)).replace("._netCDF4", "") if self.isunlimited(): return "%r (unlimited): name = '%s', size = %s" %\ - (type(self), self._name, len(self)) + (typ, self._name, len(self)) else: return "%r: name = '%s', size = %s" %\ - (type(self), self._name, len(self)) + (typ, self._name, len(self)) def __len__(self): # len(`Dimension` instance) returns current size of dimension @@ -4450,7 +4451,7 @@ behavior is similar to Fortran or Matlab, but different than numpy. cdef int ierr, no_fill if not dir(self._grp): return 'Variable object no longer valid' - ncdump = [repr(type(self))] + ncdump = [repr(type(self)).replace("._netCDF4", "")] show_more_dtype = True if self._iscompound: kind = 'compound' @@ -6099,13 +6100,13 @@ the user. CompoundType constructor. - **`group`**: `Group` instance to associate with the compound datatype. + **`grp`**: `Group` instance to associate with the compound datatype. - **`datatype`**: A numpy dtype object describing a structured (a.k.a record) + **`dt`**: A numpy dtype object describing a structured (a.k.a record) array. Can be composed of homogeneous numeric or character data types, or other structured array data types. - **`datatype_name`**: a Python string containing a description of the + **`dtype_name`**: a Python string containing a description of the compound data type. ***Note 1***: When creating nested compound data types, @@ -6149,8 +6150,9 @@ the user. return self.__str__() def __str__(self): - return "%r: name = '%s', numpy dtype = %s" %\ - (type(self), self.name, self.dtype) + typ = repr(type(self)).replace("._netCDF4", "") + return "%s: name = '%s', numpy dtype = %s" %\ + (typ, self.name, self.dtype) def __reduce__(self): # raise error is user tries to pickle a CompoundType object. @@ -6437,11 +6439,12 @@ the user. return self.__str__() def __str__(self): + typ = repr(type(self)).replace("._netCDF4", "") if self.dtype == str: - return '%r: string type' % (type(self),) + return '%r: string type' % (typ,) else: return "%r: name = '%s', numpy dtype = %s" %\ - (type(self), self.name, self.dtype) + (typ, self.name, self.dtype) def __reduce__(self): # raise error is user tries to pickle a VLType object. @@ -6549,8 +6552,9 @@ the user. return self.__str__() def __str__(self): + typ = repr(type(self)).replace("._netCDF4", "") return "%r: name = '%s', numpy dtype = %s, fields/values =%s" %\ - (type(self), self.name, self.dtype, self.enum_dict) + (typ, self.name, self.dtype, self.enum_dict) def __reduce__(self): # raise error is user tries to pickle a EnumType object. @@ -6969,7 +6973,7 @@ Example usage (See `MFDataset.__init__` for more details): return the netcdf attribute names from the master file. """ - return self._cdf[0].__dict__.keys() + return list(self._cdf[0].__dict__) def close(self): """ @@ -6989,7 +6993,7 @@ Example usage (See `MFDataset.__init__` for more details): return all(map(lambda dset: dset.isopen(), self._cdf)) def __repr__(self): - ncdump = [repr(type(self))] + ncdump = [repr(type(self)).replace("._netCDF4", "")] dimnames = tuple(str(dimname) for dimname in self.dimensions.keys()) varnames = tuple(str(varname) for varname in self.variables.keys()) grpnames = () @@ -7019,12 +7023,13 @@ class _Dimension: def isunlimited(self): return True def __repr__(self): + typ = repr(type(self)).replace("._netCDF4", "") if self.isunlimited(): return "%r (unlimited): name = '%s', size = %s" %\ - (type(self), self._name, len(self)) + (typ, self._name, len(self)) else: return "%r: name = '%s', size = %s" %\ - (type(self), self._name, len(self)) + (typ, self._name, len(self)) class _Variable: def __init__(self, dset, varname, var, recdimname): @@ -7043,7 +7048,7 @@ class _Variable: def typecode(self): return self.dtype def ncattrs(self): - return self._mastervar.__dict__.keys() + return list(self._mastervar.__dict__.keys()) def __getattr__(self,name): if name == 'shape': return self._shape() if name == 'ndim': return len(self._shape()) @@ -7053,7 +7058,7 @@ class _Variable: except: raise AttributeError(name) def __repr__(self): - ncdump = [repr(type(self))] + ncdump = [repr(type(self)).replace("._netCDF4", "")] dimnames = tuple(str(dimname) for dimname in self.dimensions) ncdump.append('%s %s%s' % (self.dtype, self._name, dimnames)) for name in self.ncattrs(): diff --git a/src/netCDF4/py.typed b/src/netCDF4/py.typed new file mode 100644 index 000000000..e69de29bb