diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 02931f5..72d769a 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -4,11 +4,6 @@ Contributing to OpenET Thank you for your interest in supporting the OpenET project. -Versioning -========== - -The OpenET project is currently in Beta and the version numbers will be "0.0.X" until a non-Beta release is made. - Coding Conventions ================== diff --git a/openet/core/api.py b/openet/core/api.py deleted file mode 100644 index b3161c2..0000000 --- a/openet/core/api.py +++ /dev/null @@ -1,117 +0,0 @@ -# import sys - -import ee - -# import openet.interp as interp - - -# class API(): -# """""" -# def __init__(self): -# """""" -# pass - - -# TODO: Make this a class eventually -def collection( - et_model, - variable, - collections, - start_date, - end_date, - t_interval, - geometry, - **kwargs - ): - """Generic OpenET Collection - - Parameters - ---------- - self : - et_model : {'ndvi', 'ssebop'} - ET model. - variable : str - - collections : list - GEE satellite image collection IDs. - start_date : str - ISO format inclusive start date (i.e. YYYY-MM-DD). - end_date : str - ISO format exclusive end date (i.e. YYYY-MM-DD). - t_interval : {'daily', 'monthly', 'annual', 'overpass'} - Time interval over which to interpolate and aggregate values. - Selecting 'overpass' will return values only for the overpass dates. - geometry : ee.Geometry - The geometry object will be used to filter the input collections. - kwargs : - - Returns - ------- - ee.ImageCollection - - Notes - ----- - The following is just a basic framework for what needs to happen to - go from input parameters to an output image collection. - A lot of this might make more sense in the init function above. - - """ - - # Load the ET model - if et_model.lower() == 'ndvi': - - # # DEADBEEF - Manually adding OpenET Model to system path - # # This will eventually be handled by import openet modules - # import os - # model_path = os.path.dirname(os.path.dirname(os.path.dirname( - # os.path.abspath(os.path.realpath(__file__))))) - # print(model_path) - # sys.path.insert(0, os.path.join(model_path, 'openet-ndvi-test')) - # print(sys.path) - - try: - import openet.ndvi as model - except ModuleNotFoundError: - print( - '\nThe ET model {} could not be imported'.format(et_model) + - '\nPlease ensure that the model has been installed') - return False - except Exception as e: - print('Unhandled Exception: {}'.format(e)) - raise - - elif et_model.lower() == 'ssebop': - - # # DEADBEEF - Manually adding OpenET Models to system path - # # This will eventually be handled by import openet modules - # import os - # model_path = os.path.dirname(os.path.dirname(os.path.dirname( - # os.path.abspath(os.path.realpath(__file__))))) - # print(model_path) - # sys.path.insert(0, os.path.join(model_path, 'openet-ssebop-test')) - - try: - import openet.ssebop as model - except ModuleNotFoundError: - print( - '\nThe ET model {} could not be imported'.format(et_model) + - '\nPlease ensure that the model has been installed') - return False - except Exception as e: - print('Unhandled Exception: {}'.format(e)) - raise - - else: - # CGM - This could just be a value error exception - raise ValueError('unsupported et_model type') - - variable_coll = model.collection( - variable, - collections, - start_date, - end_date, - t_interval, - geometry, - **kwargs - ) - return variable_coll diff --git a/openet/core/common.py b/openet/core/common.py index d1d9d08..52f2c00 100644 --- a/openet/core/common.py +++ b/openet/core/common.py @@ -15,8 +15,7 @@ def landsat_c2_sr_cloud_mask( filter_flag=False, saturated_flag=False, sr_cloud_qa_flag=False, - # cloud_confidence=3, - ): +): """Compute cloud mask for a Landsat Coll. 2 Level 2 (SR) image using multiple approaches Parameters @@ -213,10 +212,7 @@ def landsat_c2_sr_lst_correct(sr_image, ndvi): image_geom = sr_image.geometry() image_extent = image_geom.bounds(1, 'EPSG:4326') - # # Simple clip extent from image geometry bounds - # clip_extent = image_geom.bounds(1, 'EPSG:4326') - - # # Server side approach for getting image extent snapped to the ASTER GED grid + # Server side approach for getting image extent snapped to the ASTER GED grid buffer_cells = 1 cellsize = 0.1 image_xy = ee.Array(image_extent.coordinates().get(0)).transpose().toList() @@ -230,13 +226,7 @@ def landsat_c2_sr_lst_correct(sr_image, ndvi): ymax = ymax.divide(cellsize * buffer_cells).ceil().multiply(cellsize * buffer_cells) clip_extent = ee.Geometry.Rectangle([xmin, ymin, xmax, ymax], 'EPSG:4326', False) - # Landsat image projection for resample/reproject ( - # image_proj = sr_image.projection() - # image_crs = image_proj.crs() - # image_geo = ee.List(ee.Dictionary(ee.Algorithms.Describe(image_proj)).get('transform')) - # Aster Global Emissivity Dataset - ged = ee.Image('NASA/ASTER_GED/AG100_003').clip(clip_extent) veg_emis = 0.99 @@ -276,7 +266,6 @@ def get_matched_c2_t1_image(input_img): ee.String(scene_id.get(0)).cat('_').cat(ee.String(scene_id.get(2))) .cat('_').cat(ee.String(scene_id.get(3))) ) - # scene_id = ee.String(input_img.get('system:index')) # Testing if it is any faster to filter each collection separately # TODO: Test if adding an extra .filterDate() call helps @@ -304,7 +293,6 @@ def get_matched_c2_t1_radiance_image(input_img): ee.String(scene_id.get(0)).cat('_').cat(ee.String(scene_id.get(2))) .cat('_').cat(ee.String(scene_id.get(3))) ) - # scene_id = ee.String(input_img.get('system:index')) # TODO: Fix error when images that are in the T1_L2 collections but not in the T1, # will fail with a .get() error because matched_img is 'None', @@ -433,5 +421,4 @@ def get_matched_c2_t1_radiance_image(input_img): .divide(Rc).add(1.0).log().pow(-1) .multiply(ee.Number(k2.get(spacecraft_id))) .rename('lst') - # .set({'system:time_start': sr_image.get('system:time_start')}) ) diff --git a/openet/core/ensemble.py b/openet/core/ensemble.py index d6397e3..c0c0d41 100644 --- a/openet/core/ensemble.py +++ b/openet/core/ensemble.py @@ -83,11 +83,9 @@ def mad(ensemble_img, made_scale=2): # Map the band names to the model index # The extra combine is to try and account for the ensemble images having # band names that don't map to one of the model names/indexes - # band_dict = ee.Dictionary(model_index) band_dict = ee.Dictionary(model_index).combine(ee.Dictionary.fromLists( output_bands, ee.List.sequence(9, model_count.add(9).subtract(1)))) band_index = model_names.map(lambda x: band_dict.get(x)) - # band_index = ee.List.sequence(1, model_count) # Bit encode the models using the model index values # Build the index array from the ensemble images so that the index @@ -110,22 +108,6 @@ def mad(ensemble_img, made_scale=2): return output_img - # DEADBEEF - # print(utils.point_image_value(ee.Image(images), [-120, 39], scale=1)) - # print(utils.point_image_value(ens_median, [-120, 39], scale=1)) - # print(utils.point_image_value(MADe, [-120, 39], scale=1)) - # print(utils.point_image_value(upper, [-120, 39], scale=1)) - # print(utils.point_image_value(lower, [-120, 39], scale=1)) - # print(utils.point_image_value(count_img, [-120, 39], scale=1)) - # print(utils.point_image_value(sort_img, [-120, 39], scale=1)) - # print(utils.point_image_value(model_drop_mean, [-120, 39], scale=1)) - - # # Add mean, ens_median, made, upper and lower to ensemble for map display - # ens_mean = ensemble_img.reduce(ee.Reducer.mean()).rename(["mean"]) - # ensemble = ensemble_sims_crop.addBands(ens_mean).addBands(ens_median) - # .addBands(MADe).addBands(upper).addBands(lower) - # .addBands(model_drop_mean) - def mean(ensemble_img): """Simple arithmetic mean placeholder function diff --git a/openet/core/interpolate.py b/openet/core/interpolate.py index ffaa0c4..0f20a2f 100644 --- a/openet/core/interpolate.py +++ b/openet/core/interpolate.py @@ -1,12 +1,10 @@ import datetime import logging -# import pprint from dateutil.relativedelta import relativedelta import ee from . import utils -# import openet.core.utils as utils RESAMPLE_METHODS = ['nearest', 'bilinear', 'bicubic'] @@ -112,14 +110,13 @@ def daily( ) ) - # # DEADBEEF - This module is assuming that the time band is already in - # # the source collection. - # # Uncomment the following to add a time band here instead. + # # This module is assuming that the time band is already in the source collection + # # Uncomment the following to add a time band here instead # def add_utc0_time_band(image): # date_0utc = utils.date_0utc(ee.Date(image.get('system:time_start'))) # return image.addBands([ - # image.select([0]).double().multiply(0).add(date_0utc.millis())\ - # .rename(['time'])]) + # image.select([0]).double().multiply(0).add(date_0utc.millis()).rename(['time']) + # ]) # source_coll = ee.ImageCollection(source_coll.map(add_utc0_time_band)) if interp_method.lower() == 'linear': @@ -148,8 +145,6 @@ def _linear(image): # All filtering will be done based on 0 UTC dates utc0_date = utils.date_0utc(target_date) - # utc0_time = target_date.update(hour=0, minute=0, second=0)\ - # .millis().divide(1000).floor().multiply(1000) time_img = ee.Image.constant(utc0_date.millis()).double() # Build nodata images/masks that can be placed at the front/back of @@ -158,16 +153,12 @@ def _linear(image): prev_qm_mask = ( ee.Image.constant(ee.List.repeat(1, bands.length())) .double().rename(bands).updateMask(0) - .set({ - 'system:time_start': utc0_date.advance(-interp_days - 1, 'day').millis(), - }) + .set({'system:time_start': utc0_date.advance(-interp_days - 1, 'day').millis()}) ) next_qm_mask = ( ee.Image.constant(ee.List.repeat(1, bands.length())) .double().rename(bands).updateMask(0) - .set({ - 'system:time_start': utc0_date.advance(interp_days + 2, 'day').millis(), - }) + .set({'system:time_start': utc0_date.advance(interp_days + 2, 'day').millis()}) ) if use_joins: @@ -201,16 +192,9 @@ def _linear(image): # Flatten the previous/next collections to single images # The closest image in time should be on "top" - # CGM - Is the previous collection already sorted? - # prev_qm_img = prev_qm_coll.mosaic() prev_qm_img = prev_qm_coll.sort('system:time_start', True).mosaic() next_qm_img = next_qm_coll.sort('system:time_start', False).mosaic() - # DEADBEEF - It might be easier to interpolate all bands instead of - # separating the value and time bands - # prev_value_img = ee.Image(prev_qm_img).double() - # next_value_img = ee.Image(next_qm_img).double() - # Interpolate all bands except the "time" band prev_bands = prev_qm_img.bandNames().filter(ee.Filter.notEquals('item', 'time')) next_bands = next_qm_img.bandNames().filter(ee.Filter.notEquals('item', 'time')) @@ -247,15 +231,6 @@ def _linear(image): # Pass the target image back out as a new band target_img = image.select([0]).double() - # CGM - This approach might work but the time/mask bands tend to be masked - # but the current expectation is that the reference will not be masked - # # Map the target values onto the interpolated image - # # Apply resampling if needed and rename to match the target image - # if resample_method in ['bilinear', 'bicubic']: - # target_img = target_img.resample(resample_method) - # target_band = ee.String(ee.List(image.select([0]).bandNames()).get(0)) - # target_img = interp_img.multiply(0).double().add(target_img).rename(target_band) - # TODO: Come up with a dynamic way to name the "product" bands # The product bands will have a "_1" appended to the name # i.e. "et_fraction" -> "et_fraction_1" @@ -271,12 +246,9 @@ def _linear(image): return output_img.set({ 'system:index': image.get('system:index'), 'system:time_start': image.get('system:time_start'), - # 'system:time_start': utc0_time, - }) + }) interp_coll = ee.ImageCollection(target_coll.map(_linear)) - # elif interp_method.lower() == 'nearest': - # interp_coll = ee.ImageCollection(target_coll.map(_nearest)) else: raise ValueError(f'invalid interpolation method: {interp_method}') @@ -288,7 +260,7 @@ def aggregate_to_daily( start_date=None, end_date=None, agg_type='mean', - ): +): """Aggregate images by day without using joins The primary purpose of this function is to join separate Landsat images @@ -346,8 +318,8 @@ def aggregate_func(date_str): if agg_type.lower() == 'mean': agg_img = agg_coll.mean() - # elif agg_type.lower() == 'median': - # agg_img = agg_coll.median() + elif agg_type.lower() == 'median': + agg_img = agg_coll.median() else: raise ValueError(f'unsupported agg_type "{agg_type}"') @@ -459,7 +431,7 @@ def from_scene_et_fraction( elif interp_method.lower() not in ['linear']: raise ValueError(f'unsupported interp_method: {interp_method}') - if ((type(interp_days) is str or type(interp_days) is float) and + if (((type(interp_days) is str) or (type(interp_days) is float)) and utils.is_number(interp_days)): interp_days = int(interp_days) elif not type(interp_days) is int: @@ -479,13 +451,6 @@ def from_scene_et_fraction( end_dt -= relativedelta(days=+1) end_dt = datetime.datetime(end_dt.year, end_dt.month, 1) end_dt += relativedelta(months=+1) - # elif t_interval.lower() == 'annual': - # start_dt = datetime.datetime(start_dt.year, 1, 1) - # # Covert end date to inclusive, flatten to beginning of year, - # # then add a year which will make it exclusive - # end_dt -= relativedelta(days=+1) - # end_dt = datetime.datetime(end_dt.year, 1, 1) - # end_dt += relativedelta(years=+1) start_date = start_dt.strftime('%Y-%m-%d') end_date = end_dt.strftime('%Y-%m-%d') @@ -534,7 +499,7 @@ def from_scene_et_fraction( et_reference_factor = model_args['et_reference_factor'] else: et_reference_factor = 1.0 - logging.debug('interp_factor was not set, default to 1.0') + logging.debug('et_reference_factor was not set, default to 1.0') if 'et_reference_resample' in model_args.keys(): et_reference_resample = model_args['et_reference_resample'].lower() @@ -672,7 +637,6 @@ def interpolate_prep(img): interp_days=interp_days, use_joins=use_joins, compute_product=False, - # resample_method=et_reference_resample, ) # The interpolate.daily() function can/will return the product of @@ -698,7 +662,7 @@ def compute_et(img): daily_coll = daily_coll.map(compute_et) - # CGM - This function is being declared here to avoid passing in all the common parameters + # This function is being declared here to avoid passing in all the common parameters # such as: daily_coll, daily_et_ref_coll, interp_properties, variables, etc. # Long term it should probably be declared outside of this function # so it can be called directly and tested separately @@ -780,7 +744,6 @@ def aggregate_image(agg_start_date, agg_end_date, date_format): if mask_partial_aggregations: aggregation_days = ee.Date(agg_end_date).difference(ee.Date(agg_start_date), 'day') aggregation_count_mask = aggregation_count_img.gte(aggregation_days.subtract(1)) - # aggregation_count_mask = agg_count_img.gte(aggregation_days) output_img = output_img.updateMask(aggregation_count_mask) return ( @@ -789,7 +752,6 @@ def aggregate_image(agg_start_date, agg_end_date, date_format): 'system:index': ee.Date(agg_start_date).format(date_format), 'system:time_start': ee.Date(agg_start_date).millis() }) - # .set(interp_properties) ) # Combine input, interpolated, and derived values @@ -806,7 +768,7 @@ def agg_daily(daily_img): # It should be since it is coming from the interpolate source # collection, but what if source is GRIDMET (+6 UTC)? agg_start_date = ee.Date(daily_img.get('system:time_start')) - # CGM - This calls .sum() on collections with only one image + # This calls .sum() on collections with only one image return aggregate_image( agg_start_date=agg_start_date, agg_end_date=ee.Date(agg_start_date).advance(1, 'day'), @@ -959,13 +921,6 @@ def from_scene_et_actual( end_dt -= relativedelta(days=+1) end_dt = datetime.datetime(end_dt.year, end_dt.month, 1) end_dt += relativedelta(months=+1) - # elif t_interval.lower() == 'annual': - # start_dt = datetime.datetime(start_dt.year, 1, 1) - # # Covert end date to inclusive, flatten to beginning of year, - # # then add a year which will make it exclusive - # end_dt -= relativedelta(days=+1) - # end_dt = datetime.datetime(end_dt.year, 1, 1) - # end_dt += relativedelta(years=+1) start_date = start_dt.strftime('%Y-%m-%d') end_date = end_dt.strftime('%Y-%m-%d') @@ -981,8 +936,6 @@ def from_scene_et_actual( raise ValueError('interp_source was not set') if 'interp_band' not in interp_args.keys(): raise ValueError('interp_band was not set') - if 'interp_factor' in interp_args.keys() and interp_args['interp_factor'] != 1: - raise ValueError('interp_factor is not currently support or applied') if 'interp_resample' in interp_args.keys(): interp_resample = interp_args['interp_resample'].lower() @@ -1182,12 +1135,6 @@ def normalize_et(img): et_norm_img = et_norm_img.min(float(interp_args['et_fraction_max'])) if 'et_fraction_min' in interp_args.keys(): et_norm_img = et_norm_img.max(float(interp_args['et_fraction_min'])) - # if ('et_fraction_min' in interp_args.keys() and - # 'et_fraction_max' in interp_args.keys()): - # et_norm_img = et_norm_img.clamp( - # float(interp_args['et_fraction_min']), - # float(interp_args['et_fraction_max']) - # ) return img.addBands([et_norm_img.double(), target_img.rename(['norm'])]) @@ -1218,12 +1165,11 @@ def normalize_et(img): # # if 'et' in variables or 'et_fraction' in variables: # def compute_et(img): # """This function assumes ETr and ETf are present""" - # et_img = img.select(['et_norm']).multiply( - # img.select(['et_reference'])) + # et_img = img.select(['et_norm']).multiply(img.select(['et_reference'])) # return img.addBands(et_img.double().rename('et')) # daily_coll = daily_coll.map(compute_et) - # CGM - This function is being declared here to avoid passing in all the common parameters + # This function is being declared here to avoid passing in all the common parameters # such as: daily_coll, daily_et_ref_coll, interp_properties, variables, etc. # Long term it should probably be declared outside of this function # so it can be called directly and tested separately @@ -1327,11 +1273,11 @@ def aggregate_image(agg_start_date, agg_end_date, date_format): )) elif t_interval.lower() == 'daily': def agg_daily(daily_img): - # CGM - Double check that this time_start is a 0 UTC time. + # TODO: Double check that this time_start is a 0 UTC time # It should be since it is coming from the interpolate source # collection, but what if source is GRIDMET (+6 UTC)? agg_start_date = ee.Date(daily_img.get('system:time_start')) - # CGM - This calls .sum() on collections with only one image + # This calls .sum() on collections with only one image return aggregate_image( agg_start_date=agg_start_date, agg_end_date=ee.Date(agg_start_date).advance(1, 'day'), diff --git a/openet/core/landsat.py b/openet/core/landsat.py index 6fe7967..86a8561 100644 --- a/openet/core/landsat.py +++ b/openet/core/landsat.py @@ -8,7 +8,7 @@ def c02_qa_pixel_mask( shadow_flag=True, snow_flag=False, water_flag=False, - ): +): """Landsat Collection 2 QA_PIXEL band cloud mask Parameters @@ -78,11 +78,13 @@ def c02_qa_pixel_mask( ---------- https://prd-wret.s3.us-west-2.amazonaws.com/assets/palladium/production/atoms/files/LSDS-1328_Landsat8-9-OLI-TIRS-C2-L2-DFCB-v6.pdf - """ qa_img = input_img.select(['QA_PIXEL']) mask_img = qa_img.rightShift(3).bitwiseAnd(1).neq(0) - # .Or(qa_img.rightShift(8).bitwiseAnd(3).gte(cloud_confidence)) + # The following line could be added to the mask_img call + # to include the cloud confidence bits + # .Or(qa_img.rightShift(8).bitwiseAnd(3).gte(cloud_confidence)) + if cirrus_flag: mask_img = mask_img.Or(qa_img.rightShift(2).bitwiseAnd(1).neq(0)) if dilate_flag: @@ -120,8 +122,6 @@ def c02_cloud_score_mask(input_img, cloud_score_pct=100): # Using the system:index requires an extra map call but might be more robust # since the other properties may have been dropped toa_coll = c02_matched_toa_coll(input_img, 'system:index', 'system:index') - # toa_coll = landsat_c2_l2_matched_toa_coll(input_img, 'LANDSAT_SCENE_ID', 'LANDSAT_SCENE_ID') - # toa_coll = landsat_c2_l2_matched_toa_coll(input_img) output = ee.Algorithms.If( toa_coll.size().gt(0), @@ -227,7 +227,7 @@ def c02_matched_toa_coll( input_img, image_property='LANDSAT_SCENE_ID', match_property='LANDSAT_SCENE_ID', - ): +): """Return the Landsat Collection 2 TOA collection matching an image property Parameters @@ -251,14 +251,11 @@ def c02_matched_toa_coll( """ - # Filter TOA collections to the target to image UTC day + # Filter TOA collections to the target image UTC day # This filter range could be a lot tighter but keeping it to the day makes it easier to test # and will hopefully not impact the performance too much start_date = ee.Date(input_img.get('system:time_start')).update(hour=0, minute=0, second=0) end_date = start_date.advance(1, 'day') - # # Buffer the image time_start +/- 30 minutes (this could probably be set tighter) - # start_date = ee.Date(input_img.get('system:time_start')).advance(-0.5, 'hour') - # end_date = start_date.advance(1, 'hour') l5_coll = ee.ImageCollection('LANDSAT/LT05/C02/T1_TOA').filterDate(start_date, end_date) l7_coll = ee.ImageCollection('LANDSAT/LE07/C02/T1_TOA').filterDate(start_date, end_date) @@ -268,8 +265,6 @@ def c02_matched_toa_coll( # The default system:index gets modified when the collections are merged below, # so save the system:index to a new "scene_id" property and use that for matching if match_property == 'system:index': - # def set_scene_id(img): - # return img.set('scene_id', img.get('system:index')) l5_coll = l5_coll.map(lambda img: img.set('scene_id', img.get('system:index'))) l7_coll = l7_coll.map(lambda img: img.set('scene_id', img.get('system:index'))) l8_coll = l8_coll.map(lambda img: img.set('scene_id', img.get('system:index'))) @@ -286,7 +281,7 @@ def c02_matched_l2_coll( input_img, image_property='LANDSAT_SCENE_ID', match_property='LANDSAT_SCENE_ID', - ): +): """Return the Landsat Collection 2 Level 2 collection matching an image property Parameters @@ -315,9 +310,6 @@ def c02_matched_l2_coll( # and will hopefully not impact the performance too much start_date = ee.Date(input_img.get('system:time_start')).update(hour=0, minute=0, second=0) end_date = start_date.advance(1, 'day') - # # Buffer the image time_start +/- 30 minutes (this could probably be set tighter) - # start_date = ee.Date(input_img.get('system:time_start')).advance(-0.5, 'hour') - # end_date = start_date.advance(1, 'hour') l5_coll = ee.ImageCollection('LANDSAT/LT05/C02/T1_L2').filterDate(start_date, end_date) l7_coll = ee.ImageCollection('LANDSAT/LE07/C02/T1_L2').filterDate(start_date, end_date) @@ -327,8 +319,6 @@ def c02_matched_l2_coll( # The default system:index gets modified when the collections are merged below, # so save the system:index to a new "scene_id" property and use that for matching if match_property == 'system:index': - # def set_scene_id(img): - # return img.set('scene_id', img.get('system:index')) l5_coll = l5_coll.map(lambda img: img.set('scene_id', img.get('system:index'))) l7_coll = l7_coll.map(lambda img: img.set('scene_id', img.get('system:index'))) l8_coll = l8_coll.map(lambda img: img.set('scene_id', img.get('system:index'))) diff --git a/openet/core/tests/test_a_utils.py b/openet/core/tests/test_a_utils.py index 802af2f..ff6bc5d 100644 --- a/openet/core/tests/test_a_utils.py +++ b/openet/core/tests/test_a_utils.py @@ -16,6 +16,7 @@ def arg_valid_date_exception(): assert utils.arg_valid_date('3/10/2010') +# TODO: Write this test # def arg_valid_file(): # assert False @@ -88,10 +89,12 @@ def test_date_range_skip_leap_days(start_dt, end_dt, skip_leap_days, expected): start_dt, end_dt, skip_leap_days=skip_leap_days))) == expected +# TODO: Write this test # def test_delay_task(): # assert False +# TODO: Write this test # def test_get_ee_assets(): # assert False @@ -101,10 +104,12 @@ def test_get_ee_assets_exception(): assert utils.get_ee_assets('deadbeef', retries=1) +# TODO: Write this test # def test_get_ee_tasks(): # assert False +# TODO: Write this test # def test_ee_task_start(): # assert False @@ -232,9 +237,7 @@ def test_constant_image_value_multiband_bands(expected=10.123456789, tol=0.00000 ] ) def test_point_image_value(image_id, xy, scale, expected, tol): - output = utils.point_image_value( - ee.Image(image_id).select(['elevation'], ['output']), xy, scale - ) + output = utils.point_image_value(ee.Image(image_id).select(['elevation'], ['output']), xy, scale) assert abs(output['output'] - expected) <= tol diff --git a/openet/core/tests/test_common.py b/openet/core/tests/test_common.py index ab08d1a..ce7f1d7 100644 --- a/openet/core/tests/test_common.py +++ b/openet/core/tests/test_common.py @@ -1,5 +1,3 @@ -# import pprint - import ee import pytest diff --git a/openet/core/tests/test_ensemble.py b/openet/core/tests/test_ensemble.py index ade765c..ab1610c 100644 --- a/openet/core/tests/test_ensemble.py +++ b/openet/core/tests/test_ensemble.py @@ -1,20 +1,9 @@ -# import datetime -# import logging -# import pprint - import ee import pytest import openet.core.ensemble as ensemble import openet.core.utils as utils -# logging.basicConfig(level=logging.DEBUG, format='%(message)s') - - -# TODO: Write a test to check that the output bandnames -# def test_mad_bandname(): -# assert False - @pytest.mark.parametrize( "model_values, made_scale, expected", @@ -87,7 +76,7 @@ [[224, 231, 210, 194, 235, 242], 2, 228.4], # Check that the scale term does something - # CGM - This might be better as a separate test function + # This might be better as a separate test function [[87, 55, 23, 25, 41, 12], 2.5, 40.5], # Check that dropping an image works (dropped SIMS None) @@ -95,11 +84,9 @@ ] ) def test_mad_values(model_values, made_scale, expected, tol=0.001): - print(model_values) # TODO: Check if using constant images is faster and works images = [] - mask_img = ee.Image('IDAHO_EPSCOR/GRIDMET/20200101')\ - .select(['tmmx']).multiply(0) + mask_img = ee.Image('IDAHO_EPSCOR/GRIDMET/20200101').select(['tmmx']).multiply(0) for i, value in enumerate(model_values): if value is None: images.append(mask_img.updateMask(0).rename([f'B{i+1}'])) @@ -126,10 +113,8 @@ def test_mad_values(model_values, made_scale, expected, tol=0.001): ] ) def test_mad_other_stats(model_values, made_scale, mn, mx, count, tol=0.001): - # print(model_values) images = [] - mask_img = ee.Image('IDAHO_EPSCOR/GRIDMET/20200101')\ - .select(['tmmx']).multiply(0) + mask_img = ee.Image('IDAHO_EPSCOR/GRIDMET/20200101').select(['tmmx']).multiply(0) for i, value in enumerate(model_values): if value is None: images.append(mask_img.updateMask(0).rename([f'B{i+1}'])) @@ -147,34 +132,22 @@ def test_mad_other_stats(model_values, made_scale, mn, mx, count, tol=0.001): "model_values, made_scale, index", [ # Same test values as in stats test - [{'disalexi': 118, 'eemetric': 111, 'geesebal': 57, 'ptjpl': 75, - 'sims': 99, 'ssebop': 58}, 2, 63], - [{'disalexi': 87, 'eemetric': 55, 'geesebal': 23, 'ptjpl': 25, - 'sims': 41, 'ssebop': 12}, 2, 62], - [{'disalexi': 55, 'eemetric': 23, 'geesebal': 25, 'ptjpl': 41, - 'sims': None, 'ssebop': 12}, 2, 47], - [{'disalexi': 100, 'eemetric': 23, 'geesebal': 25, 'ptjpl': 41, - 'sims': None, 'ssebop': 12}, 2, 46], - [{'disalexi': 0, 'eemetric': 0, 'geesebal': 1, 'ptjpl': 15, - 'sims': None, 'ssebop': 0}, 2, 39], - [{'disalexi': 0, 'eemetric': 0, 'geesebal': 13, 'ptjpl': 8, - 'sims': None, 'ssebop': 0}, 2, 43], - [{'disalexi': 0, 'eemetric': 0, 'geesebal': 0, 'ptjpl': 12, - 'sims': None, 'ssebop': 0}, 2, 39], + [{'disalexi': 118, 'eemetric': 111, 'geesebal': 57, 'ptjpl': 75, 'sims': 99, 'ssebop': 58}, 2, 63], + [{'disalexi': 87, 'eemetric': 55, 'geesebal': 23, 'ptjpl': 25, 'sims': 41, 'ssebop': 12}, 2, 62], + [{'disalexi': 55, 'eemetric': 23, 'geesebal': 25, 'ptjpl': 41, 'sims': None, 'ssebop': 12}, 2, 47], + [{'disalexi': 100, 'eemetric': 23, 'geesebal': 25, 'ptjpl': 41, 'sims': None, 'ssebop': 12}, 2, 46], + [{'disalexi': 0, 'eemetric': 0, 'geesebal': 1, 'ptjpl': 15, 'sims': None, 'ssebop': 0}, 2, 39], + [{'disalexi': 0, 'eemetric': 0, 'geesebal': 13, 'ptjpl': 8, 'sims': None, 'ssebop': 0}, 2, 43], + [{'disalexi': 0, 'eemetric': 0, 'geesebal': 0, 'ptjpl': 12, 'sims': None, 'ssebop': 0}, 2, 39], # Check that 5 band image with SIMS totally excluded - [{'disalexi': 55, 'eemetric': 23, 'geesebal': 25, 'ptjpl': 41, - 'ssebop': 12}, 2, 47], + [{'disalexi': 55, 'eemetric': 23, 'geesebal': 25, 'ptjpl': 41, 'ssebop': 12}, 2, 47], # Check that order doesn't matter - [{'eemetric': 23, 'sims': None, 'ptjpl': 41, 'disalexi': 55, - 'geesebal': 25, 'ssebop': 12}, 2, 47], - + [{'eemetric': 23, 'sims': None, 'ptjpl': 41, 'disalexi': 55, 'geesebal': 25, 'ssebop': 12}, 2, 47], ] ) def test_mad_index(model_values, made_scale, index): - # print(model_values) images = [] - mask_img = ee.Image('IDAHO_EPSCOR/GRIDMET/20200101')\ - .select(['tmmx']).multiply(0) + mask_img = ee.Image('IDAHO_EPSCOR/GRIDMET/20200101').select(['tmmx']).multiply(0) for name, value in model_values.items(): if value is None: images.append(mask_img.updateMask(0).rename([name])) @@ -196,8 +169,6 @@ def test_mad_index(model_values, made_scale, index): ] ) def test_mean_values(model_values, expected, tol=0.0001): - # input_img = ee.Image.constant(int(img_value, 2)).rename(['BQA']) - # input_args = {'input_img': input_img} images = [] for value in model_values: if value is None: diff --git a/openet/core/tests/test_interpolate.py b/openet/core/tests/test_interpolate.py index a99a0bc..476c707 100644 --- a/openet/core/tests/test_interpolate.py +++ b/openet/core/tests/test_interpolate.py @@ -1,6 +1,5 @@ import datetime import logging -import pprint import ee import pytest @@ -16,8 +15,6 @@ def test_ee_init(): def tgt_image(tgt_value, tgt_time): - # tgt_images = [] - # for tgt_value, tgt_time in zip(tgt_values, tgt_timess): return ( ee.Image.constant(tgt_value).rename(['tgt']) .set({'system:time_start': tgt_time, @@ -96,28 +93,15 @@ def scene_coll(variables, etf=[0.4, 0.4, 0.4], et=[5, 5, 5], ndvi=[0.6, 0.6, 0.6 # since they are now added in the interpolation calls scene_coll = ee.ImageCollection.fromImages([ ee.Image([img.add(etf[0]), img.add(et[0]), img.add(ndvi[0])]) - .rename(['et_fraction', 'et', 'ndvi']) - .set({'system:index': 'LE07_044033_20170708', 'system:time_start': time1}), + .rename(['et_fraction', 'et', 'ndvi']) + .set({'system:index': 'LE07_044033_20170708', 'system:time_start': time1}), ee.Image([img.add(etf[1]), img.add(et[1]), img.add(ndvi[1])]) - .rename(['et_fraction', 'et', 'ndvi']) - .set({'system:index': 'LC08_044033_20170716', 'system:time_start': time2}), + .rename(['et_fraction', 'et', 'ndvi']) + .set({'system:index': 'LC08_044033_20170716', 'system:time_start': time2}), ee.Image([img.add(etf[2]), img.add(et[2]), img.add(ndvi[2])]) - .rename(['et_fraction', 'et', 'ndvi']) - .set({'system:index': 'LE07_044033_20170724', 'system:time_start': time3}), + .rename(['et_fraction', 'et', 'ndvi']) + .set({'system:index': 'LE07_044033_20170724', 'system:time_start': time3}), ]) - # # Mask and time bands currently get added on to the scene collection - # # and images are unscaled just before interpolating in the export tool - # scene_coll = ee.ImageCollection.fromImages([ - # ee.Image([img.add(etf[0]), img.add(et[0]), img.add(ndvi[0]), img.add(date1), mask]) - # .rename(['et_fraction', 'et', 'ndvi', 'time', 'mask']) - # .set({'system:index': 'LE07_044033_20170708', 'system:time_start': time1}), - # ee.Image([img.add(etf[1]), img.add(et[1]), img.add(ndvi[1]), img.add(date2), mask]) - # .rename(['et_fraction', 'et', 'ndvi', 'time', 'mask']) - # .set({'system:index': 'LC08_044033_20170716', 'system:time_start': time2}), - # ee.Image([img.add(etf[2]), img.add(et[2]), img.add(ndvi[2]), img.add(date3), mask]) - # .rename(['et_fraction', 'et', 'ndvi', 'time', 'mask']) - # .set({'system:index': 'LE07_044033_20170724', 'system:time_start': time3}), - # ]) return scene_coll.select(variables) @@ -173,8 +157,7 @@ def test_daily_collection(tgt_value, tgt_time, src_values, src_times, expected, """Test the daily method for collections of constant images""" tgt_coll = ee.ImageCollection([tgt_image(tgt_value, tgt_time)]) src_coll = ee.ImageCollection.fromImages(src_images(src_values, src_times)) - output_coll = interpolate.daily( - tgt_coll, src_coll, interp_days=32, interp_method='linear', use_joins=False) + output_coll = interpolate.daily(tgt_coll, src_coll, interp_days=32, interp_method='linear', use_joins=False) output = utils.constant_image_value(ee.Image(output_coll.first())) assert abs(output['src'] - expected) <= tol assert abs(output['tgt'] - tgt_value) <= tol @@ -189,8 +172,7 @@ def test_daily_collection(tgt_value, tgt_time, src_values, src_times, expected, def test_daily_compute_product_true(tgt_value, tgt_time, src_values, src_times, expected, tol=0.01): """Test if the compute_product flag returns the product bands""" tgt_coll = ee.ImageCollection([tgt_image(tgt_value, tgt_time)]) - src_coll = ee.ImageCollection.fromImages( - src_images(src_values, src_times)) + src_coll = ee.ImageCollection.fromImages(src_images(src_values, src_times)) output_coll = interpolate.daily( tgt_coll, src_coll, interp_days=32, interp_method='linear', use_joins=False, compute_product=True) @@ -224,8 +206,7 @@ def test_daily_use_joins_true(tgt_value, tgt_time, src_values, src_times, expect """Test that output with use_joins=True is the same as use_joins=False""" tgt_coll = ee.ImageCollection([tgt_image(tgt_value, tgt_time)]) src_coll = ee.ImageCollection.fromImages(src_images(src_values, src_times)) - output_coll = interpolate.daily( - tgt_coll, src_coll, interp_days=32, interp_method='linear', use_joins=True) + output_coll = interpolate.daily(tgt_coll, src_coll, interp_days=32, interp_method='linear', use_joins=True) output = utils.constant_image_value(ee.Image(output_coll.first())) assert abs(output['src'] - expected) <= tol assert abs(output['tgt'] - tgt_value) <= tol @@ -287,8 +268,8 @@ def test_daily_small_interp_days(interp_days, tgt_value, tgt_time, src_values, tgt_coll = ee.ImageCollection([tgt_image(tgt_value, tgt_time)]) src_coll = ee.ImageCollection.fromImages(src_images(src_values, src_times)) output_coll = ee.ImageCollection(interpolate.daily( - tgt_coll, src_coll, interp_days=interp_days, interp_method='linear', - use_joins=False)) + tgt_coll, src_coll, interp_days=interp_days, interp_method='linear', use_joins=False + )) output = utils.constant_image_value(ee.Image(output_coll.first())) if expected is None: assert output['src'] is None @@ -351,8 +332,8 @@ def test_daily_interp_days_use_joins(interp_days, tgt_value, tgt_time, tgt_coll = ee.ImageCollection([tgt_image(tgt_value, tgt_time)]) src_coll = ee.ImageCollection.fromImages(src_images(src_values, src_times)) output_coll = ee.ImageCollection(interpolate.daily( - tgt_coll, src_coll, interp_days=interp_days, interp_method='linear', - use_joins=True)) + tgt_coll, src_coll, interp_days=interp_days, interp_method='linear', use_joins=True + )) output = utils.constant_image_value(ee.Image(output_coll.first())) if expected is None: assert output['src'] is None @@ -379,9 +360,7 @@ def test_daily_interp_days_use_joins(interp_days, tgt_value, tgt_time, def test_aggregate_to_daily_values_single_band(src_values, time_values, expected, tol=0.01): """Test daily aggregation function for single-band constant images""" image_list = src_images(src_values, time_values) - time_list = [ - utils.date_0utc(ee.Date(time)).millis().getInfo() - for time in time_values] + time_list = [utils.date_0utc(ee.Date(time)).millis().getInfo() for time in time_values] # Dates can be ISO Date string or milliseconds since epoch # Use the date strings to get 0 UTC dates and better match model calls @@ -391,8 +370,8 @@ def test_aggregate_to_daily_values_single_band(src_values, time_values, expected # end_date = max(time_values) src_coll = interpolate.aggregate_to_daily( - ee.ImageCollection.fromImages(image_list), - start_date, end_date, agg_type='mean') + ee.ImageCollection.fromImages(image_list), start_date, end_date, agg_type='mean' + ) src_image = ee.Image(src_coll.first()) output = utils.constant_image_value(src_image) @@ -416,11 +395,12 @@ def test_aggregate_to_daily_values_multi_band(src_values, time_values, expected, time_list = [] for src, time in zip(src_values, time_values): time_0utc = utils.date_0utc(ee.Date(time)).millis() - image = ee.Image.constant(src).double()\ - .addBands(ee.Image.constant(time_0utc).double())\ - .rename(['etrf', 'etof', 'time'])\ - .set({'system:index': ee.Date(time).format('yyyyMMdd'), - 'system:time_start': time}) + image = ( + ee.Image.constant(src).double() + .addBands(ee.Image.constant(time_0utc).double()) + .rename(['etrf', 'etof', 'time']) + .set({'system:index': ee.Date(time).format('yyyyMMdd'), 'system:time_start': time}) + ) image_list.append(image) time_list.append(time_0utc.getInfo()) @@ -428,14 +408,11 @@ def test_aggregate_to_daily_values_multi_band(src_values, time_values, expected, # Use the date strings to get 0 UTC dates and better match model calls start_date = ee.Date(min(time_values)).format('yyyy-MM-dd') end_date = ee.Date(max(time_values)).advance(1, 'day').format('yyyy-MM-dd') - # start_date = min(time_values) - # end_date = max(time_values) output_coll = interpolate.aggregate_to_daily( - ee.ImageCollection.fromImages(image_list), - start_date, end_date, agg_type='mean') - output_image = ee.Image(output_coll.first())\ - .select([0, 1, 2], ['etrf', 'etof', 'time']) + ee.ImageCollection.fromImages(image_list), start_date, end_date, agg_type='mean' + ) + output_image = ee.Image(output_coll.first()).select([0, 1, 2], ['etrf', 'etof', 'time']) output = utils.constant_image_value(output_image) assert abs(output['etrf'] - expected[0]) <= tol @@ -445,21 +422,24 @@ def test_aggregate_to_daily_values_multi_band(src_values, time_values, expected, def test_aggregate_to_daily_properties(): """Test daily aggregation image properties""" - source_coll = ee.ImageCollection('LANDSAT/LC08/C02/T1_TOA')\ - .filterDate('2017-06-30', '2017-08-02')\ + source_coll = ( + ee.ImageCollection('LANDSAT/LC08/C02/T1_TOA') + .filterDate('2017-06-30', '2017-08-02') .filterBounds(ee.Geometry.Point(-121.9, 39)) + ) output = utils.get_info(interpolate.aggregate_to_daily(source_coll).first()) - assert set(output['properties'].keys()) == { - 'date', 'system:index', 'system:time_start'} + assert set(output['properties'].keys()) == {'date', 'system:index', 'system:time_start'} assert output['properties']['date'] == '2017-06-30' def test_aggregate_to_daily_date_filtering(): """Test daily aggregation start/end date filtering""" - source_coll = ee.ImageCollection('LANDSAT/LC08/C02/T1_TOA')\ - .filterDate('2017-01-01', '2018-01-01')\ - .filterBounds(ee.Geometry.Point(-121.9, 39))\ + source_coll = ( + ee.ImageCollection('LANDSAT/LC08/C02/T1_TOA') + .filterDate('2017-01-01', '2018-01-01') + .filterBounds(ee.Geometry.Point(-121.9, 39)) .select(['B1']) + ) # First test if both start and end date are set output = utils.get_info(interpolate.aggregate_to_daily( @@ -487,8 +467,8 @@ def test_from_scene_et_fraction_t_interval_daily_values_interpolated(tol=0.0001) model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET', 'et_reference_band': 'eto', 'et_reference_resample': 'nearest'}, - t_interval='daily') - + t_interval='daily', + ) TEST_POINT = (-121.5265, 38.7399) output = utils.point_coll_value(output_coll, TEST_POINT, scale=30) assert abs(output['ndvi']['2017-07-01'] - 0.2) <= tol @@ -522,8 +502,8 @@ def test_from_scene_et_fraction_t_interval_daily_values_et_reference( model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET', 'et_reference_band': et_reference_band, 'et_reference_resample': 'nearest'}, - t_interval='daily') - + t_interval='daily', + ) TEST_POINT = (-121.5265, 38.7399) output = utils.point_coll_value(output_coll, TEST_POINT, scale=30) assert abs(output['et_reference'][et_reference_date] - et_reference) <= tol @@ -547,8 +527,8 @@ def test_from_scene_et_fraction_t_interval_monthly_values( model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET', 'et_reference_band': et_reference_band, 'et_reference_resample': 'nearest'}, - t_interval='monthly') - + t_interval='monthly', + ) TEST_POINT = (-121.5265, 38.7399) output = utils.point_coll_value(output_coll, TEST_POINT, scale=30) assert abs(output['ndvi']['2017-07-01'] - 0.6) <= tol @@ -567,8 +547,8 @@ def test_from_scene_et_fraction_t_interval_custom_values(tol=0.0001): model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET', 'et_reference_band': 'etr', 'et_reference_resample': 'nearest'}, - t_interval='custom') - + t_interval='custom', + ) TEST_POINT = (-121.5265, 38.7399) output = utils.point_coll_value(output_coll, TEST_POINT, scale=30) assert abs(output['ndvi']['2017-07-01'] - 0.6) <= tol @@ -587,8 +567,8 @@ def test_from_scene_et_fraction_t_interval_custom_daily_count(): model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET', 'et_reference_band': 'etr', 'et_reference_resample': 'nearest'}, - t_interval='custom') - + t_interval='custom', + ) TEST_POINT = (-121.5265, 38.7399) output = utils.point_coll_value(output_coll, TEST_POINT, scale=30) assert output['daily_count']['2017-07-01'] == 31 @@ -604,8 +584,8 @@ def test_from_scene_et_fraction_t_interval_custom_mask_partial_aggregations_true model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET', 'et_reference_band': 'etr', 'et_reference_resample': 'nearest'}, - t_interval='custom') - + t_interval='custom', + ) TEST_POINT = (-121.5265, 38.7399) output = utils.point_coll_value(output_coll, TEST_POINT, scale=30) assert output['daily_count']['2017-07-01'] is None @@ -621,8 +601,8 @@ def test_from_scene_et_fraction_t_interval_custom_mask_partial_aggregations_fals model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET', 'et_reference_band': 'etr', 'et_reference_resample': 'nearest'}, - t_interval='custom') - + t_interval='custom', + ) TEST_POINT = (-121.5265, 38.7399) output = utils.point_coll_value(output_coll, TEST_POINT, scale=30) # CGM - 3 Landsat scenes with +/-2 days around each should be 15 days @@ -640,8 +620,8 @@ def test_from_scene_et_fraction_t_interval_monthly_et_reference_factor(tol=0.000 'et_reference_band': 'etr', 'et_reference_factor': 0.5, 'et_reference_resample': 'nearest'}, - t_interval='monthly') - + t_interval='monthly', + ) TEST_POINT = (-121.5265, 38.7399) output = utils.point_coll_value(output_coll, TEST_POINT, scale=30) assert abs(output['ndvi']['2017-07-01'] - 0.6) <= tol @@ -668,8 +648,8 @@ def test_from_scene_et_fraction_t_interval_monthly_et_reference_resample( model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET', 'et_reference_band': et_reference_band, 'et_reference_resample': 'bilinear'}, - t_interval='monthly') - + t_interval='monthly', + ) TEST_POINT = (-121.5265, 38.7399) output = utils.point_coll_value(output_coll, TEST_POINT, scale=30) assert abs(output['ndvi']['2017-07-01'] - 0.6) <= tol @@ -692,8 +672,8 @@ def test_from_scene_et_fraction_t_interval_monthly_interp_args_et_reference(tol= 'et_reference_band': 'etr', 'et_reference_resample': 'nearest'}, model_args={}, - t_interval='monthly') - + t_interval='monthly', + ) TEST_POINT = (-121.5265, 38.7399) output = utils.point_coll_value(output_coll, TEST_POINT, scale=30) assert abs(output['ndvi']['2017-07-01'] - 0.6) <= tol @@ -715,8 +695,8 @@ def test_from_scene_et_actual_t_interval_daily_values_eto(tol=0.0001): model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET', 'et_reference_band': 'eto', 'et_reference_resample': 'nearest'}, - t_interval='daily') - + t_interval='daily', + ) TEST_POINT = (-121.5265, 38.7399) output = utils.point_coll_value(output_coll, TEST_POINT, scale=30) assert abs(output['et_fraction']['2017-07-10'] - 0.5970309972763062) <= tol @@ -740,8 +720,8 @@ def test_from_scene_et_actual_t_interval_daily_values_etr(tol=0.0001): model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET', 'et_reference_band': 'etr', 'et_reference_resample': 'nearest'}, - t_interval='daily') - + t_interval='daily', + ) TEST_POINT = (-121.5265, 38.7399) output = utils.point_coll_value(output_coll, TEST_POINT, scale=30) assert abs(output['et_fraction']['2017-07-10'] - 0.449444979429245) <= tol @@ -773,8 +753,8 @@ def test_from_scene_et_actual_t_interval_monthly_values( model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET', 'et_reference_band': et_reference_band, 'et_reference_resample': 'nearest'}, - t_interval='monthly') - + t_interval='monthly', + ) TEST_POINT = (-121.5265, 38.7399) output = utils.point_coll_value(output_coll, TEST_POINT, scale=30) assert abs(output['et']['2017-07-01'] - et) <= tol @@ -796,8 +776,8 @@ def test_from_scene_et_actual_t_interval_custom_values_monthly(tol=0.0001): model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET', 'et_reference_band': 'etr', 'et_reference_resample': 'nearest'}, - t_interval='custom') - + t_interval='custom', + ) TEST_POINT = (-121.5265, 38.7399) output = utils.point_coll_value(output_coll, TEST_POINT, scale=30) assert abs(output['et']['2017-07-01'] - 142.9622039794922) <= tol @@ -819,8 +799,8 @@ def test_from_scene_et_actual_t_interval_custom_daily_count(): model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET', 'et_reference_band': 'etr', 'et_reference_resample': 'nearest'}, - t_interval='custom') - + t_interval='custom', + ) TEST_POINT = (-121.5265, 38.7399) output = utils.point_coll_value(output_coll, TEST_POINT, scale=30) assert output['daily_count']['2017-07-01'] == 31 @@ -840,8 +820,8 @@ def test_from_scene_et_actual_t_interval_custom_mask_partial_aggregations_true() model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET', 'et_reference_band': 'etr', 'et_reference_resample': 'nearest'}, - t_interval='custom') - + t_interval='custom', + ) TEST_POINT = (-121.5265, 38.7399) output = utils.point_coll_value(output_coll, TEST_POINT, scale=30) assert output['daily_count']['2017-07-01'] is None @@ -861,8 +841,8 @@ def test_from_scene_et_actual_t_interval_custom_mask_partial_aggregations_false( model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET', 'et_reference_band': 'etr', 'et_reference_resample': 'nearest'}, - t_interval='custom') - + t_interval='custom', + ) TEST_POINT = (-121.5265, 38.7399) output = utils.point_coll_value(output_coll, TEST_POINT, scale=30) assert output['daily_count']['2017-07-01'] == 18 @@ -880,8 +860,8 @@ def test_from_scene_et_actual_t_interval_monthly_et_reference_factor(tol=0.0001) 'et_reference_band': 'etr', 'et_reference_factor': 0.5, 'et_reference_resample': 'nearest'}, - t_interval='monthly') - + t_interval='monthly', + ) TEST_POINT = (-121.5265, 38.7399) output = utils.point_coll_value(output_coll, TEST_POINT, scale=30) assert abs(output['et']['2017-07-01'] - 142.9622039794922) <= tol @@ -910,8 +890,8 @@ def test_from_scene_et_actual_t_interval_monthly_et_reference_resample( model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET', 'et_reference_band': et_reference_band, 'et_reference_resample': 'bilinear'}, - t_interval='monthly') - + t_interval='monthly', + ) TEST_POINT = (-121.5265, 38.7399) output = utils.point_coll_value(output_coll, TEST_POINT, scale=30) assert abs(output['et']['2017-07-01'] - et) <= tol @@ -933,8 +913,8 @@ def test_from_scene_et_actual_t_interval_monthly_interp_args_et_reference(tol=0. 'et_reference_band': 'etr', 'et_reference_resample': 'nearest'}, model_args={}, - t_interval='monthly') - + t_interval='monthly', + ) TEST_POINT = (-121.5265, 38.7399) output = utils.point_coll_value(output_coll, TEST_POINT, scale=30) assert abs(output['et']['2017-07-01'] - 142.9622039794922) <= tol @@ -957,8 +937,8 @@ def test_from_scene_et_actual_t_interval_daily_et_fraction_max(tol=0.0001): model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET', 'et_reference_band': 'etr', 'et_reference_resample': 'nearest'}, - t_interval='daily') - + t_interval='daily', + ) TEST_POINT = (-121.5265, 38.7399) output = utils.point_coll_value(output_coll, TEST_POINT, scale=30) assert abs(output['et_fraction']['2017-07-10'] - 1.4) <= tol @@ -975,7 +955,8 @@ def test_from_scene_et_fraction_t_interval_bad_value(): 'et_reference_band': 'etr', 'et_reference_factor': 0.5, 'et_reference_resample': 'nearest'}, - t_interval='deadbeef') + t_interval='deadbeef', + ) def test_from_scene_et_fraction_t_interval_no_value(): @@ -989,7 +970,8 @@ def test_from_scene_et_fraction_t_interval_no_value(): model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET', 'et_reference_band': 'etr', 'et_reference_factor': 0.5, - 'et_reference_resample': 'nearest'}) + 'et_reference_resample': 'nearest'}, + ) def test_from_scene_et_actual_t_interval_bad_value(): @@ -1004,7 +986,8 @@ def test_from_scene_et_actual_t_interval_bad_value(): model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET', 'et_reference_band': 'etr', 'et_reference_resample': 'nearest'}, - t_interval='deadbeef') + t_interval='deadbeef', + ) def test_from_scene_et_actual_t_interval_no_value(): @@ -1019,7 +1002,8 @@ def test_from_scene_et_actual_t_interval_no_value(): 'interp_resample': 'nearest'}, model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET', 'et_reference_band': 'etr', - 'et_reference_resample': 'nearest'}) + 'et_reference_resample': 'nearest'}, + ) def test_from_scene_et_fraction_interp_args_use_joins_true(tol=0.01): @@ -1032,8 +1016,8 @@ def test_from_scene_et_fraction_interp_args_use_joins_true(tol=0.01): model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET', 'et_reference_band': 'etr', 'et_reference_resample': 'nearest'}, - t_interval='monthly') - + t_interval='monthly', + ) TEST_POINT = (-121.5265, 38.7399) output = utils.point_coll_value(output_coll, TEST_POINT, scale=30) assert abs(output['et_reference']['2017-07-01'] - 310.3) <= tol @@ -1051,8 +1035,8 @@ def test_from_scene_et_fraction_interp_args_use_joins_false(tol=0.01): model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET', 'et_reference_band': 'etr', 'et_reference_resample': 'nearest'}, - t_interval='monthly') - + t_interval='monthly', + ) TEST_POINT = (-121.5265, 38.7399) output = utils.point_coll_value(output_coll, TEST_POINT, scale=30) assert abs(output['et_reference']['2017-07-01'] - 310.3) <= tol @@ -1072,8 +1056,8 @@ def test_from_scene_et_actual_interp_args_use_joins_true(tol=0.01): model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET', 'et_reference_band': 'etr', 'et_reference_resample': 'nearest'}, - t_interval='monthly') - + t_interval='monthly', + ) TEST_POINT = (-121.5265, 38.7399) output = utils.point_coll_value(output_coll, TEST_POINT, scale=30) assert abs(output['et']['2017-07-01'] - 142.9622039794922) <= tol @@ -1093,135 +1077,10 @@ def test_from_scene_et_actual_interp_args_use_joins_false(tol=0.01): model_args={'et_reference_source': 'IDAHO_EPSCOR/GRIDMET', 'et_reference_band': 'etr', 'et_reference_resample': 'nearest'}, - t_interval='monthly') - + t_interval='monthly', + ) TEST_POINT = (-121.5265, 38.7399) output = utils.point_coll_value(output_coll, TEST_POINT, scale=30) assert abs(output['et']['2017-07-01'] - 142.9622039794922) <= tol assert abs(output['et_reference']['2017-07-01'] - 310.3) <= tol assert output['count']['2017-07-01'] == 3 - - -""" -These tests were attempts at making "full" interpolation calls. -They could be removed but are being left in case we want to explore this again -at some point in the future. -""" -# def test_daily_values_collection_a(): -# """Test the daily interpolation using real images""" -# target_coll = ( -# ee.ImageCollection('IDAHO_EPSCOR/GRIDMET') -# .filterDate('2017-06-30', '2017-08-02') -# .select(['etr']) -# ) -# source_coll = ( -# ee.ImageCollection('LANDSAT/LC08/C02/T1_L2') -# .filterDate('2017-06-30', '2017-08-02') -# .filterBounds(ee.Geometry.Point(-121.9, 39)) -# .select(['SR_B2']) -# ) -# -# def add_time_band(image): -# date_0utc = utils.date_0utc(ee.Date(image.get('system:time_start'))) -# return image.addBands([ -# image.select([0]).double().multiply(0).add(date_0utc.millis()).rename(['time']) -# ]) -# source_coll = ee.ImageCollection(source_coll.map(add_time_band)) -# -# # print('\nTARGET') -# # target_info = utils.point_coll_value( -# # target_coll, xy=(-121.5265, 38.7399), scale=30) -# # pprint.pprint(target_info) -# -# print('\nSOURCE') -# source_info = utils.point_coll_value( -# source_coll, xy=(-121.5265, 38.7399), scale=30) -# pprint.pprint(source_info) -# -# print('\nOUTPUT') -# output = utils.point_coll_value( -# interpolate.daily(target_coll, source_coll, interp_days=32), -# xy=(-121.5265, 38.7399), scale=30) -# pprint.pprint(output['SR_B2']) -# -# assert True -# -# -# def test_daily_values_collection_b(): -# """Test the daily interpolation for short interp_days values""" -# target_coll = ( -# ee.ImageCollection('IDAHO_EPSCOR/GRIDMET') -# .filterDate('2017-06-30', '2017-08-02') -# .select(['etr']) -# ) -# source_coll = ( -# ee.ImageCollection('LANDSAT/LC08/C02/T1_L2') -# .filterDate('2017-06-30', '2017-08-02') -# .filterBounds(ee.Geometry.Point(-121.9, 39)) -# .select(['SR_B2']) -# ) -# -# def add_time_band(image): -# date_0utc = utils.date_0utc(ee.Date(image.get('system:time_start'))) -# return image.addBands([ -# image.select([0]).double().multiply(0).add(date_0utc.millis()).rename(['time']) -# ]) -# source_coll = ee.ImageCollection(source_coll.map(add_time_band)) -# -# # print('\nTARGET') -# # target_info = utils.point_coll_value( -# # target_coll, xy=(-121.5265, 38.7399), scale=30) -# # pprint.pprint(target_info) -# -# print('\nSOURCE') -# source_info = utils.point_coll_value( -# source_coll, xy=(-121.5265, 38.7399), scale=30) -# pprint.pprint(source_info) -# -# print('\nOUTPUT') -# output = utils.point_coll_value( -# interpolate.daily(target_coll, source_coll, interp_days=4), -# xy=(-121.5265, 38.7399), scale=30) -# pprint.pprint(output['SR_B2']) -# -# assert True -# -# -# def test_daily_values_collection_c(): -# """Test if the daily interpolation holds the last known value""" -# target_coll = ( -# ee.ImageCollection('IDAHO_EPSCOR/GRIDMET') -# .filterDate('2017-07-01', '2017-08-05') -# .select(['etr']) -# ) -# source_coll = ( -# ee.ImageCollection('LANDSAT/LC08/C02/T1_L2') -# .filterDate('2017-06-30', '2017-07-17') -# .filterBounds(ee.Geometry.Point(-121.9, 39)) -# .select(['SR_B2']) -# ) -# -# def add_time_band(image): -# date_0utc = utils.date_0utc(ee.Date(image.get('system:time_start'))) -# return image.addBands([ -# image.select([0]).double().multiply(0).add(date_0utc.millis()).rename(['time']) -# ]) -# source_coll = ee.ImageCollection(source_coll.map(add_time_band)) -# -# # print('\nTARGET') -# # target_info = utils.point_coll_value( -# # target_coll, xy=(-121.5265, 38.7399), scale=30) -# # pprint.pprint(target_info) -# -# print('\nSOURCE') -# source_info = utils.point_coll_value( -# source_coll, xy=(-121.5265, 38.7399), scale=30) -# pprint.pprint(source_info) -# -# print('\nOUTPUT') -# output = utils.point_coll_value( -# interpolate.daily(target_coll, source_coll, interp_days=16), -# xy=(-121.5265, 38.7399), scale=30) -# pprint.pprint(output['SR_B2']) -# -# assert True diff --git a/openet/core/utils.py b/openet/core/utils.py index 30d11b6..7cf3399 100644 --- a/openet/core/utils.py +++ b/openet/core/utils.py @@ -28,11 +28,10 @@ def affine_transform(image): def arg_valid_date(input_date): """Check that a date string is ISO format (YYYY-MM-DD) - This function is used to check the format of dates entered as command - line arguments. - DEADBEEF - It would probably make more sense to have this function - parse the date using dateutil parser (http://labix.org/python-dateutil) - and return the ISO format string + This function is used to check the format of dates entered as command line arguments + It would probably make more sense to have this function + parse the date using dateutil parser (http://labix.org/python-dateutil) + and return the ISO format string Parameters ---------- @@ -61,7 +60,6 @@ def arg_valid_file(file_path): """ if os.path.isfile(os.path.abspath(os.path.realpath(file_path))): return os.path.abspath(os.path.realpath(file_path)) - # return file_path else: raise argparse.ArgumentTypeError(f'{file_path} does not exist') @@ -80,10 +78,6 @@ def date_0utc(date): """ return ee.Date.fromYMD(date.get('year'), date.get('month'), date.get('day')) - # Extra operations are needed since update() does not set milliseconds to 0. - # return ee.Date(date.update(hour=0, minute=0, second=0).millis()\ - # .divide(1000).floor().multiply(1000)) - def date_range(start_dt, end_dt, days=1, skip_leap_days=False): """Generate dates within a range (inclusive) @@ -141,7 +135,7 @@ def delay_task(delay_time=0, max_ready=-1): time.sleep(delay_time) elif max_ready > 0: # Don't continue to the next export until the number of READY tasks - # is greater than or equal to "max_ready" + # is greater than or equal to "max_ready" # Force delay_time to be at least 10 seconds if max_ready is set # to avoid excessive EE calls @@ -154,8 +148,6 @@ def delay_task(delay_time=0, max_ready=-1): while True: ready_tasks = get_ee_tasks(states=['READY'], verbose=True) ready_task_count = len(ready_tasks.keys()) - # logging.debug(' Ready tasks: {}'.format( - # ', '.join(sorted(ready_tasks.keys())))) if ready_task_count >= max_ready: logging.debug(' {} tasks queued, waiting {} seconds to start ' @@ -200,9 +192,7 @@ def get_ee_assets(asset_id, start_dt=None, end_dt=None, retries=6): except ValueError: raise Exception('\nThe collection or folder does not exist, exiting') except Exception as e: - logging.warning( - f' Error getting asset list, retrying ({i}/{retries})\n {e}' - ) + logging.warning(f' Error getting asset list, retrying ({i}/{retries})\n {e}') time.sleep((i+1) ** 2) if asset_id_list is None: @@ -238,9 +228,7 @@ def get_ee_tasks(states=['RUNNING', 'READY'], verbose=False, retries=6): # task_list = ee.data.listOperations() break except Exception as e: - logging.warning( - f' Error getting task list, retrying ({i}/{retries})\n {e}' - ) + logging.warning(f' Error getting task list, retrying ({i}/{retries})\n {e}') time.sleep((i+1) ** 2) if task_list is None: raise Exception('\nUnable to retrieve task list, exiting') @@ -249,10 +237,6 @@ def get_ee_tasks(states=['RUNNING', 'READY'], verbose=False, retries=6): [task for task in task_list if task['state'] in states], key=lambda t: (t['state'], t['description'], t['id']) ) - # task_list = sorted([ - # [t['state'], t['description'], t['id']] for t in task_list - # if t['state'] in states - # ]) # Convert the task list to a dictionary with the task name as the key return {task['description']: task for task in task_list} @@ -288,7 +272,6 @@ def print_ee_tasks(tasks): (update_dt - start_dt).total_seconds() / 3600, task['id']) ) - # elif task['state'] in states: else: logging.debug(' {:8s} {}'.format(task['state'], task['description'])) @@ -330,26 +313,6 @@ def get_info(ee_obj, max_retries=4): return output -# def getinfo(ee_obj, n=4): -# """Make an exponential back off getInfo call on an Earth Engine object""" -# output = None -# for i in range(1, n): -# try: -# output = ee_obj.getInfo() -# except ee.ee_exception.EEException as e: -# if 'Earth Engine memory capacity exceeded' in str(e): -# logging.info(' Resending query ({}/10)'.format(i)) -# logging.debug(' {}'.format(e)) -# time.sleep(i ** 2) -# else: -# raise e -# -# if output: -# break -# -# return output - - def ee_task_start(task, n=6): """Make an exponential backoff Earth Engine request""" for i in range(1, n): @@ -360,14 +323,6 @@ def ee_task_start(task, n=6): logging.info(' Resending query ({}/{})'.format(i, n)) logging.debug(' {}'.format(e)) time.sleep(i ** 2) - # except ee.ee_exception.EEException as e: - # if ('Earth Engine memory capacity exceeded' in str(e) or - # 'Earth Engine capacity exceeded' in str(e)): - # logging.info(' Resending query ({}/10)'.format(i)) - # logging.debug(' {}'.format(e)) - # time.sleep(i ** 2) - # else: - # raise e return task @@ -423,33 +378,10 @@ def parse_int_set(nputstr=""): except: # not an int and not a range... invalid.add(i) - # Report invalid tokens before returning valid selection - # print "Invalid set: " + str(invalid) return selection -# def wrs2_list_2_str(tiles): -# """Compact string representation of the WRS2 tile list""" -# from collections import defaultdict -# tile_dict = defaultdict(list) -# for tile in tiles: -# tile_dict[int(tile[1:4])].append(int(tile[5:8])) -# tile_dict = {k: sorted(v) for k, v in tile_dict.items()} -# return json.dumps(tile_dict, sort_keys=True) \ -# .replace('"', '').replace(' ', '')\ -# .replace('{', '').replace('}', '') -# -# -# def wrs2_str_2_list(tile_str): -# tile_set = set() -# for t in tile_str.replace('[', '').split('],'): -# path = int(t.split(':')[0]) -# for row in t.split(':')[1].replace(']', '').split(','): -# tile_set.add('p{:03d}r{:03d}'.format(path, int(row))) -# return sorted(list(tile_set)) - - # These functions support writing WRS2 path/row dictionary collapsed to ranges def wrs2_set_2_str(tiles): """Convert WRS2 tile set to a compact string/dictionary representation""" @@ -462,9 +394,11 @@ def wrs2_set_2_str(tiles): k: '[{}]'.format(list_2_str_ranges(v)) for k, v in tile_dict.items()} # tile_dict = {k: sorted(v) for k, v in tile_dict.items()} - tile_str = json.dumps(tile_dict, sort_keys=True) \ - .replace('"', '').replace(' ', '') \ + tile_str = ( + json.dumps(tile_dict, sort_keys=True) + .replace('"', '').replace(' ', '') .replace('{', '').replace('}', '') + ) return tile_str