Skip to content

Commit bd5e508

Browse files
committed
PP unpack
1 parent 242c57c commit bd5e508

File tree

2 files changed

+31
-0
lines changed

2 files changed

+31
-0
lines changed

cf/read_write/read.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -763,6 +763,7 @@ def __new__(
763763
unsqueeze=unsqueeze,
764764
domain=domain,
765765
file_type=file_type,
766+
unpack=unpack,
766767
)
767768
except DatasetTypeError as error:
768769
if file_type is None:

cf/read_write/um/umread.py

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -491,6 +491,7 @@ def __init__(
491491
info=False,
492492
squeeze=False,
493493
unsqueeze=False,
494+
unpack=True,
494495
**kwargs,
495496
):
496497
"""**Initialisation**
@@ -559,6 +560,17 @@ def __init__(
559560
560561
.. versionadded:: NEXTVERSION
561562
563+
unpack: `bool`, optional
564+
If True, the default, then unpack arrays by convention
565+
when the data is read from disk.
566+
567+
Unpacking is determined by netCDF conventions for the
568+
following variable attributes ``add_offset`` and
569+
``scale_factor``, as applied to lookup header entries
570+
BDATUM and BMKS repectively.
571+
572+
.. versionadded:: NEXTVERSION
573+
562574
kwargs: *optional*
563575
Keyword arguments providing extra CF properties for each
564576
return field construct.
@@ -579,6 +591,7 @@ def __init__(
579591
self.height_at_top_of_model = height_at_top_of_model
580592
self.byte_ordering = byte_ordering
581593
self.word_size = word_size
594+
self.unpack = unpack
582595

583596
self.atol = cf_atol()
584597

@@ -2025,6 +2038,7 @@ def create_data(self):
20252038
klass_name = UMArray().__class__.__name__
20262039

20272040
fmt = self.fmt
2041+
unpack = self.unpack
20282042

20292043
if len(recs) == 1:
20302044
# --------------------------------------------------------
@@ -2050,6 +2064,7 @@ def create_data(self):
20502064
word_size=self.word_size,
20512065
byte_ordering=self.byte_ordering,
20522066
attributes=attributes,
2067+
unpack=unpack,
20532068
)
20542069

20552070
key = f"{klass_name}-{tokenize(subarray)}"
@@ -2103,6 +2118,7 @@ def create_data(self):
21032118
word_size=word_size,
21042119
byte_ordering=byte_ordering,
21052120
attributes=attributes,
2121+
unpack=unpack,
21062122
)
21072123

21082124
key = f"{klass_name}-{tokenize(subarray)}"
@@ -2153,6 +2169,7 @@ def create_data(self):
21532169
word_size=word_size,
21542170
byte_ordering=byte_ordering,
21552171
attributes=attributes,
2172+
unpack=unpack,
21562173
)
21572174

21582175
key = f"{klass_name}-{tokenize(subarray)}"
@@ -3392,6 +3409,7 @@ def read(
33923409
domain=False,
33933410
file_type=None,
33943411
ignore_unknown_type=False,
3412+
unpack=True,
33953413
):
33963414
"""Read fields from a PP file or UM fields file.
33973415
@@ -3474,6 +3492,17 @@ def read(
34743492
34753493
.. versionadded:: NEXTVERSION
34763494
3495+
unpack: `bool`, optional
3496+
If True, the default, then unpack arrays by convention
3497+
when the data is read from disk.
3498+
3499+
Unpacking is determined by netCDF conventions for the
3500+
following variable attributes ``add_offset`` and
3501+
``scale_factor``, as applied to lookup header entries
3502+
BDATUM and BMKS repectively.
3503+
3504+
.. versionadded:: NEXTVERSION
3505+
34773506
:Returns:
34783507
34793508
`list`
@@ -3555,6 +3584,7 @@ def read(
35553584
implementation=self.implementation,
35563585
select=select,
35573586
info=info,
3587+
unpack=unpack,
35583588
)
35593589
for var in f.vars
35603590
]

0 commit comments

Comments
 (0)