@@ -163,10 +163,10 @@ class read(cfdm.read):
163
163
============ ============================================
164
164
file type Description
165
165
============ ============================================
166
- ``'netCDF'`` Binary netCDF-3 or netCDF-4 file
167
- ``'CDL'`` Text CDL representations of a netCDF dataset
168
- ``'Zarr'`` A Zarr v2 (xarray) or Zarr v3 hierarchy
169
- ``'UM'`` UM fields file or PP file
166
+ ``'netCDF'`` A netCDF-3 or netCDF-4 dataset
167
+ ``'CDL'`` A text CDL file of a netCDF dataset
168
+ ``'Zarr'`` A Zarr v2 (xarray) or Zarr v3 dataset
169
+ ``'UM'`` A UM fields file or PP dataset
170
170
============ ============================================
171
171
172
172
.. versionadded:: NEXTVERSION
@@ -566,14 +566,15 @@ def _finalise(self):
566
566
# ----------------------------------------------------------------
567
567
# Aggregate the output fields/domains
568
568
# ----------------------------------------------------------------
569
- constructs = self .constructs
570
- if self .aggregate and len (constructs ) > 1 :
569
+ if self .aggregate and len (self .constructs ) > 1 :
571
570
aggregate_options = self .aggregate_options
572
571
# Set defaults specific to UM fields
573
572
if UM and "strict_units" not in aggregate_options :
574
573
aggregate_options ["relaxed_units" ] = True
575
574
576
- self .constructs = cf_aggregate (constructs , ** aggregate_options )
575
+ self .constructs = cf_aggregate (
576
+ self .constructs , ** aggregate_options
577
+ )
577
578
578
579
# ----------------------------------------------------------------
579
580
# Add standard names to UM/PP fields (after aggregation)
@@ -611,7 +612,7 @@ def _initialise(self):
611
612
# Initialise the list of output constructs
612
613
if self .field :
613
614
self .constructs = FieldList ()
614
- else :
615
+ elif self . domain :
615
616
self .constructs = DomainList ()
616
617
617
618
# Recognised UM dataset formats
@@ -681,6 +682,7 @@ def _read(self, dataset):
681
682
super ()._read (dataset )
682
683
683
684
if self .dataset_contents is not None :
685
+ # Successfully read the dataset
684
686
return
685
687
686
688
# ------------------------------------------------------------
@@ -727,22 +729,23 @@ def _read(self, dataset):
727
729
self .unique_dataset_categories .add ("UM" )
728
730
729
731
if self .dataset_contents is not None :
732
+ # Successfully read the dataset
730
733
return
731
734
732
735
# ------------------------------------------------------------
733
736
# Try to read as a GRIB dataset
734
737
#
735
- # Not yet available! The framework will be:
738
+ # Not yet available! When (if) the time comes, the framework
739
+ # will be:
736
740
# ------------------------------------------------------------
741
+ #
737
742
# if dataset_type is None or dataset_type.intersection(
738
743
# self.GRIB_dataset_types
739
744
# ):
740
745
# if not hasattr(self, "grib_read"):
741
746
# # Initialise the GRIB read function
742
747
# kwargs = self.kwargs
743
- # grib_kwargs = {
744
- # <ADD SOME CODE HERE>
745
- # }
748
+ # grib_kwargs = ... # <ADD SOME CODE HERE>
746
749
#
747
750
# self.grib_read = partial(
748
751
# GRIBRead(self.implementation).read, **grib_kwargs
@@ -759,4 +762,5 @@ def _read(self, dataset):
759
762
# self.unique_dataset_categories.add("GRIB")
760
763
#
761
764
# if self.dataset_contents is not None:
765
+ # # Successfully read the dataset
762
766
# return
0 commit comments