diff --git a/pyglider/__init__.py b/pyglider/__init__.py index 40a96af..e69de29 100644 --- a/pyglider/__init__.py +++ b/pyglider/__init__.py @@ -1 +0,0 @@ -# -*- coding: utf-8 -*- diff --git a/pyglider/ncprocess.py b/pyglider/ncprocess.py index 332e7f4..8d49b50 100644 --- a/pyglider/ncprocess.py +++ b/pyglider/ncprocess.py @@ -183,7 +183,7 @@ def extract_timeseries_profiles(inname, outdir, deploymentyaml, force=False): # add traj_strlen using bare ntcdf to make IOOS happy with netCDF4.Dataset(outname, 'r+') as nc: - nc.renameDimension('string%d' % trajlen, 'traj_strlen') + nc.renameDimension(f'string{trajlen}', 'traj_strlen') def make_gridfiles( diff --git a/pyglider/seaexplorer.py b/pyglider/seaexplorer.py index 09e9c15..b15f07a 100644 --- a/pyglider/seaexplorer.py +++ b/pyglider/seaexplorer.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- """ SeaExplorer-specific processing routines. """ @@ -21,8 +20,8 @@ def _outputname(f, outdir): fnout = os.path.basename(f) fns = fnout.split('.') fns = fns[:5] - fns[4] = '%04d' % int(fns[4]) - fns[1] = '%04d' % int(fns[1]) + fns[4] = f'{int(fns[4]):04d}' + fns[1] = f'{int(fns[1]):04d}' fnout = '' for ff in fns: fnout += ff.lower() + '.' @@ -128,7 +127,7 @@ def raw_to_rawnc( # If no files of this type found, try the next type continue - for ind, f in enumerate(files): + for f in files: # output name: fnout, filenum = _outputname(f, outdir) _log.info(f'{f} to {fnout}') @@ -573,4 +572,4 @@ def raw_to_timeseries( raw_to_L1timeseries = raw_to_L0timeseries = raw_to_timeseries merge_rawnc = merge_parquet -__all__ = ['raw_to_rawnc', 'merge_parquet', 'raw_to_timeseries'] +__all__ = ['merge_parquet', 'raw_to_rawnc', 'raw_to_timeseries'] diff --git a/pyglider/slocum.py b/pyglider/slocum.py index 1c5f11a..7db3ae5 100644 --- a/pyglider/slocum.py +++ b/pyglider/slocum.py @@ -160,7 +160,7 @@ def _decode_sensor_info(dfh, meta): activeSensorList = [{} for i in range(nsensors_used)] outlines = [] sensorInfo = {} - for i in range(nsensors_total): + for _ in range(nsensors_total): line = dfh.readline().decode('utf-8') if line.split(':')[0] != 's': raise ValueError('Failed to parse sensor info') @@ -264,7 +264,7 @@ def dbd_get_meta(filename, cachedir): # read the cache first. If its not there, try to make one.... try: activeSensorList, sensorInfo = _get_cached_sensorlist(cachedir, meta) - except FileNotFoundError: + except FileNotFoundError as e: if localcache: _log.info('No cache file found; trying to create one') _make_cache(outlines, cachedir, meta) @@ -275,7 +275,7 @@ def dbd_get_meta(filename, cachedir): 'offloaddir/Science/STATE/CACHE/ or ', 'offloaddir/Main_board/STATE/CACHE/. ', f'Copy those locally into {cachedir}', - ) + ) from e meta['activeSensorList'] = activeSensorList # get the file's timestamp... meta['_dbdfiletimestamp'] = os.path.getmtime(filename) @@ -441,8 +441,8 @@ def dbd_to_dict(dinkum_file, cachedir, keys=None): proctimeend = time.time() _log.info( - ('%s lines of data read from %s, data rate of %s rows ' 'per second') - % (len(data), dinkum_file, len(data) / (proctimeend - proctimestart)) + f'{len(data)} lines of data read from {dinkum_file}, data rate of {len(data) / (proctimeend - proctimestart)} rows ' + 'per second' ) dfh.close() @@ -605,7 +605,7 @@ def datameta_to_nc( # make a long string for activeSensorList: listst = '' for sensor in meta['activeSensorList']: - listst += '%s' % sensor + listst += str(sensor) listst += '\n' ds.attrs['activeSensorList'] = listst @@ -1128,7 +1128,7 @@ def parse_logfiles(files): for fn in files: found_time = False - with open(fn, 'r') as fin: + with open(fn) as fin: for ll in fin: if 'Curr Time:' in ll: times[ntimes] = ll @@ -1208,7 +1208,7 @@ def parse_logfiles_maybe(files): for fn in files: found_time = False - with open(fn, 'r') as fin: + with open(fn) as fin: for l in fin: if 'Curr Time:' in l: times[ntimes] = l @@ -1273,7 +1273,7 @@ def parse_logfiles_maybe(files): __all__ = [ 'binary_to_rawnc', 'merge_rawnc', - 'raw_to_timeseries', 'parse_gliderState', 'parse_logfiles', + 'raw_to_timeseries', ] diff --git a/pyglider/utils.py b/pyglider/utils.py index fe5db06..483434e 100644 --- a/pyglider/utils.py +++ b/pyglider/utils.py @@ -124,7 +124,7 @@ def get_profiles(ds, min_dp=10.0, inversion=3.0, filt_length=7, min_nsamples=14) ) dpall = np.diff(p) inflect = np.where(dpall[:-1] * dpall[1:] < 0)[0] - for n, i in enumerate(inflect[:-1]): + for n, _ in enumerate(inflect[:-1]): nprofile = inflect[n + 1] - inflect[n] inds = np.arange(good[inflect[n]], good[inflect[n + 1]] + 1) + 1 dp = np.diff(ds.pressure[inds[[-1, 0]]]) @@ -553,8 +553,8 @@ def fill_metadata(ds, metadata, sensor_data): ds.attrs['title'] = ds.attrs['id'] dt = ds.time.values - ds.attrs['time_coverage_start'] = '%s' % dt[0] - ds.attrs['time_coverage_end'] = '%s' % dt[-1] + ds.attrs['time_coverage_start'] = str(dt[0]) + ds.attrs['time_coverage_end'] = str(dt[-1]) ds.attrs['processing_level'] = ( 'Level 0 (L0) processed data timeseries; ' 'no corrections or data screening' @@ -701,7 +701,7 @@ def _parse_gliderxml_pos(fname): xmln = fname from bs4 import BeautifulSoup - with open(xmln, 'r') as fin: + with open(xmln) as fin: y = BeautifulSoup(fin, features='xml') time = None lon = None @@ -735,7 +735,7 @@ def _parse_gliderxml_surfacedatetime(fname): xmln = fname from bs4 import BeautifulSoup - with open(xmln, 'r') as fin: + with open(xmln) as fin: y = BeautifulSoup(fin, features='xml') time = None for a in y.find_all('report'): @@ -751,8 +751,8 @@ def _parse_gliderxml_surfacedatetime(fname): def example_gridplot( filename, outname, - toplot=['potential_temperature', 'salinity', 'oxygen_concentration'], - pdenlevels=np.arange(10, 30, 0.5), + toplot=['potential_temperature', 'salinity', 'oxygen_concentration'], # noqa: B006 + pdenlevels=np.arange(10, 30, 0.5), # noqa: B008 dpi=200, ylim=None, ): @@ -790,7 +790,7 @@ def _get_deployment(deploymentyaml): deploymentyaml, ] deployment = {} - for nn, d in enumerate(deploymentyaml): + for d in deploymentyaml: with open(d) as fin: deployment_ = yaml.safe_load(fin) for k in deployment_: @@ -834,12 +834,12 @@ def _get_glider_name_slocum(current_directory): __all__ = [ + 'fill_metadata', + 'gappy_fill_vertical', + 'get_derived_eos_raw', 'get_distance_over_ground', 'get_glider_depth', 'get_profiles_new', - 'get_derived_eos_raw', - 'fill_metadata', 'nmea2deg', - 'gappy_fill_vertical', 'oxygen_concentration_correction', ] diff --git a/pyproject.toml b/pyproject.toml index 774c93b..15473ba 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,22 +15,22 @@ filterwarnings = [ ] [tool.ruff.lint] +# See https://docs.astral.sh/ruff/rules/ for all rules select = [ - "F", # pyflakes - "I", # isort - "E", # Error + "F", # pyflakes + "I", # isort + "E", # Error + "B", # Bugbear + "UP", # pyupgrade + "LOG", # logging + "ICN", # import conventions + "G", # logging-format + "RUF", # ruff # "D", # pydocstyle - # "B", # Bugbear - # "UP", # pyupgrade - # "LOG", # logging - # "ICN", # import conventions - # "G", # logging-format - # "RUF", # ruff ] ignore = [ - # TODO: Remove - "E402", # Module level import not at top of file + "RUF005", # Consider `[*outlines, line]` instead of concatenation "F841", # Local variable `...` is assigned to but never used "E722", # Do not use bare `except` diff --git a/tests/test_seaexplorer.py b/tests/test_seaexplorer.py index c4b09f5..964c80f 100644 --- a/tests/test_seaexplorer.py +++ b/tests/test_seaexplorer.py @@ -6,12 +6,12 @@ import pytest import yaml +import pyglider.seaexplorer as seaexplorer + os.system('rm tests/data/realtime_rawnc/*') library_dir = Path(__file__).parent.parent.absolute() example_dir = library_dir / 'tests/example-data/' -import pyglider.seaexplorer as seaexplorer - def test__outputname(): fnout, filenum = seaexplorer._outputname( diff --git a/tests/test_slocum.py b/tests/test_slocum.py index 565667a..f907f7e 100644 --- a/tests/test_slocum.py +++ b/tests/test_slocum.py @@ -118,7 +118,7 @@ def test_profiles_compliant(): output_format=output_format, ) # Open the JSON output and get the compliance scores - with open(output_filename, 'r') as fp: + with open(output_filename) as fp: cc_data = json.load(fp) test = cc_data['gliderdac'] assert test['high_count'] == 0 @@ -162,7 +162,7 @@ def test_timeseries_compliant(): output_format=output_format, ) # Open the JSON output and get the compliance scores - with open(output_filename, 'r') as fp: + with open(output_filename) as fp: cc_data = json.load(fp) test = cc_data['cf:1.8'] assert test['high_count'] == 0