Skip to content

Commit 0a4eb7e

Browse files
committed
dev
1 parent 488f52e commit 0a4eb7e

File tree

3 files changed

+23
-21
lines changed

3 files changed

+23
-21
lines changed

cf/data/array/zarrarray.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,12 @@
11
import cfdm
22

33
from ...mixin_container import Container
4-
#from .mixin import ActiveStorageMixin
4+
5+
# from .mixin import ActiveStorageMixin
56

67

78
class ZarrArray(
8-
# ActiveStorageMixin,
9+
# ActiveStorageMixin,
910
Container,
1011
cfdm.ZarrArray,
1112
):

cf/read_write/read.py

Lines changed: 13 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -556,7 +556,8 @@ def _finalise(self):
556556
UM = "UM" in self.unique_dataset_categories
557557

558558
# ----------------------------------------------------------------
559-
# Select matching fields from non-UM files (before aggregation)
559+
# Select matching constructs from non-UM files (before
560+
# aggregation)
560561
# ----------------------------------------------------------------
561562
select = self.select
562563
if select and not UM:
@@ -585,8 +586,8 @@ def _finalise(self):
585586
del f._custom["standard_name"]
586587

587588
# ----------------------------------------------------------------
588-
# Select matching fields from UM files (post setting of their
589-
# standard names)
589+
# Select matching constructs from UM files (after setting
590+
# their standard names)
590591
# ----------------------------------------------------------------
591592
if select and UM:
592593
self.constructs = self.constructs.select_by_identity(*select)
@@ -679,7 +680,7 @@ def _read(self, dataset):
679680
# ------------------------------------------------------------
680681
super()._read(dataset)
681682

682-
if self.dataset_contents:
683+
if self.dataset_contents is not None:
683684
return
684685

685686
# ------------------------------------------------------------
@@ -694,13 +695,13 @@ def _read(self, dataset):
694695
um_kwargs = {
695696
key: kwargs[key]
696697
for key in (
697-
"verbose",
698698
"height_at_top_of_model",
699699
"squeeze",
700700
"unsqueeze",
701701
"domain",
702-
"file_type",
702+
"dataset_type",
703703
"unpack",
704+
"verbose",
704705
)
705706
}
706707
um_kwargs["set_standard_name"] = False
@@ -725,14 +726,14 @@ def _read(self, dataset):
725726
# Successfully read the dataset
726727
self.unique_dataset_categories.add("UM")
727728

728-
if self.dataset_contents:
729+
if self.dataset_contents is not None:
729730
return
730731

731732
# ------------------------------------------------------------
732733
# Try to read as a GRIB dataset
733-
# ------------------------------------------------------------
734-
# Not yet available! The framework will be:
735734
#
735+
# Not yet available! The framework will be:
736+
# ------------------------------------------------------------
736737
# if dataset_type is None or dataset_type.intersection(
737738
# self.GRIB_dataset_types
738739
# ):
@@ -743,19 +744,19 @@ def _read(self, dataset):
743744
# <ADD SOME CODE HERE>
744745
# }
745746
#
746-
# self.um_read = partial(
747+
# self.grib_read = partial(
747748
# GRIBRead(self.implementation).read, **grib_kwargs
748749
# )
749750
#
750751
# try:
751752
# # Try to read the dataset
752-
# self.dataset_contents = self.grid_read(dataset)
753+
# self.dataset_contents = self.grib_read(dataset)
753754
# except DatasetTypeError as error:
754755
# if dataset_type is None:
755756
# self.dataset_format_errors.append(error)
756757
# else:
757758
# # Successfully read the dataset
758759
# self.unique_dataset_categories.add("GRIB")
759760
#
760-
# if self.dataset_contents:
761+
# if self.dataset_contents is not None:
761762
# return

cf/read_write/um/umread.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3407,7 +3407,7 @@ def read(
34073407
squeeze=False,
34083408
unsqueeze=False,
34093409
domain=False,
3410-
file_type=None,
3410+
dataset_type=None,
34113411
ignore_unknown_type=False,
34123412
unpack=True,
34133413
):
@@ -3555,14 +3555,14 @@ def read(
35553555
byte_ordering = None
35563556

35573557
# ------------------------------------------------------------
3558-
# Parse the 'file_type' keyword parameter
3558+
# Parse the 'dataset_type' keyword parameter
35593559
# ------------------------------------------------------------
3560-
if file_type is not None:
3561-
if isinstance(file_type, str):
3562-
file_type = (file_type,)
3560+
if dataset_type is not None:
3561+
if isinstance(dataset_type, str):
3562+
dataset_type = (dataset_type,)
35633563

3564-
file_type = set(file_type)
3565-
if not file_type.intersection(("UM",)):
3564+
dataset_type = set(dataset_type)
3565+
if not dataset_type.intersection(("UM",)):
35663566
# Return now if there are valid file types
35673567
return []
35683568

0 commit comments

Comments
 (0)