diff --git a/intertidal/composites.py b/intertidal/composites.py index 0f798f6..93791d2 100644 --- a/intertidal/composites.py +++ b/intertidal/composites.py @@ -65,6 +65,20 @@ def tidal_thresholds( return tide_thresh_low, tide_thresh_high +def filter_granules(dataset): + """ + Return False for any Sentinel-2 dataset with a MGRS + granule region code in the list of bad region codes. + """ + drop_list = ["50HKG", "50HNF", "51LWD", "51LXE", "51LZF", + "52LBL", "52LCL", "52LDK", "53HNA", "53LRC", + "54GYU", "54LWR", "54LXR", "54LYR", "55GBP", + "55KEA", "55KFV", "55KGV", "55KHT", "55KHU", + "56KKC", "56KLC", "56KMC", "56KMV", "56KNU", + "54LWQ", "54LWP"] + return dataset.metadata.region_code not in drop_list + + def tidal_composites( satellite_ds, threshold_lowtide=0.15, @@ -459,6 +473,8 @@ def tidal_composites_cli( geom = None # Load satellite data and dataset IDs for metadata + # Use `filter_granules` predicate function to drop list of + # custom Sentinel-2 MGRS granules with poor data coverage satellite_ds, dss_s2, _ = load_data( dc=dc, study_area=study_area, @@ -476,6 +492,7 @@ def tidal_composites_cli( skip_broken_datasets=True, dataset_maturity="final", dtype="int16", + dataset_predicate=filter_granules, ) log.info( f"{run_id}: Found {len(satellite_ds.time)} satellite data timesteps" diff --git a/tests/README.md b/tests/README.md index 30524c1..2b56048 100644 --- a/tests/README.md +++ b/tests/README.md @@ -10,7 +10,7 @@ Integration tests This directory contains tests that are run to verify that DEA Intertidal code runs correctly. The ``test_intertidal.py`` file runs a small-scale full workflow analysis over an intertidal flat in the Gulf of Carpentaria using the DEA Intertidal [Command Line Interface (CLI) tools](../notebooks/Intertidal_CLI.ipynb), and compares these results against a LiDAR validation DEM to produce some simple accuracy metrics. -The latest integration test completed at **2025-05-02 17:14**. Compared to the previous run, it had an: +The latest integration test completed at **2025-05-05 11:48**. Compared to the previous run, it had an: - RMSE accuracy of **0.14 m ( :heavy_minus_sign: no change)** - MAE accuracy of **0.12 m ( :heavy_minus_sign: no change)** - Bias of **0.12 m ( :heavy_minus_sign: no change)** diff --git a/tests/validation.csv b/tests/validation.csv index 3bf3fec..2c19898 100644 --- a/tests/validation.csv +++ b/tests/validation.csv @@ -110,3 +110,4 @@ time,Correlation,RMSE,MAE,R-squared,Bias,Regression slope 2025-05-02 01:43:13.002005+00:00,0.975,0.145,0.123,0.95,0.117,1.119 2025-05-02 06:32:51.594612+00:00,0.975,0.145,0.123,0.95,0.117,1.119 2025-05-02 07:14:19.583638+00:00,0.975,0.145,0.123,0.95,0.117,1.119 +2025-05-05 01:48:26.455865+00:00,0.975,0.145,0.123,0.95,0.117,1.119 diff --git a/tests/validation.jpg b/tests/validation.jpg index 4bd4dac..bdae3cf 100644 Binary files a/tests/validation.jpg and b/tests/validation.jpg differ