@@ -491,6 +491,7 @@ def __init__(
491
491
info = False ,
492
492
squeeze = False ,
493
493
unsqueeze = False ,
494
+ unpack = True ,
494
495
** kwargs ,
495
496
):
496
497
"""**Initialisation**
@@ -559,6 +560,17 @@ def __init__(
559
560
560
561
.. versionadded:: NEXTVERSION
561
562
563
+ unpack: `bool`, optional
564
+ If True, the default, then unpack arrays by convention
565
+ when the data is read from disk.
566
+
567
+ Unpacking is determined by netCDF conventions for the
568
+ following variable attributes ``add_offset`` and
569
+ ``scale_factor``, as applied to lookup header entries
570
+ BDATUM and BMKS repectively.
571
+
572
+ .. versionadded:: NEXTVERSION
573
+
562
574
kwargs: *optional*
563
575
Keyword arguments providing extra CF properties for each
564
576
return field construct.
@@ -579,6 +591,7 @@ def __init__(
579
591
self .height_at_top_of_model = height_at_top_of_model
580
592
self .byte_ordering = byte_ordering
581
593
self .word_size = word_size
594
+ self .unpack = unpack
582
595
583
596
self .atol = cf_atol ()
584
597
@@ -2025,6 +2038,7 @@ def create_data(self):
2025
2038
klass_name = UMArray ().__class__ .__name__
2026
2039
2027
2040
fmt = self .fmt
2041
+ unpack = self .unpack
2028
2042
2029
2043
if len (recs ) == 1 :
2030
2044
# --------------------------------------------------------
@@ -2050,6 +2064,7 @@ def create_data(self):
2050
2064
word_size = self .word_size ,
2051
2065
byte_ordering = self .byte_ordering ,
2052
2066
attributes = attributes ,
2067
+ unpack = unpack ,
2053
2068
)
2054
2069
2055
2070
key = f"{ klass_name } -{ tokenize (subarray )} "
@@ -2103,6 +2118,7 @@ def create_data(self):
2103
2118
word_size = word_size ,
2104
2119
byte_ordering = byte_ordering ,
2105
2120
attributes = attributes ,
2121
+ unpack = unpack ,
2106
2122
)
2107
2123
2108
2124
key = f"{ klass_name } -{ tokenize (subarray )} "
@@ -2153,6 +2169,7 @@ def create_data(self):
2153
2169
word_size = word_size ,
2154
2170
byte_ordering = byte_ordering ,
2155
2171
attributes = attributes ,
2172
+ unpack = unpack ,
2156
2173
)
2157
2174
2158
2175
key = f"{ klass_name } -{ tokenize (subarray )} "
@@ -3392,6 +3409,7 @@ def read(
3392
3409
domain = False ,
3393
3410
file_type = None ,
3394
3411
ignore_unknown_type = False ,
3412
+ unpack = True ,
3395
3413
):
3396
3414
"""Read fields from a PP file or UM fields file.
3397
3415
@@ -3474,6 +3492,17 @@ def read(
3474
3492
3475
3493
.. versionadded:: NEXTVERSION
3476
3494
3495
+ unpack: `bool`, optional
3496
+ If True, the default, then unpack arrays by convention
3497
+ when the data is read from disk.
3498
+
3499
+ Unpacking is determined by netCDF conventions for the
3500
+ following variable attributes ``add_offset`` and
3501
+ ``scale_factor``, as applied to lookup header entries
3502
+ BDATUM and BMKS repectively.
3503
+
3504
+ .. versionadded:: NEXTVERSION
3505
+
3477
3506
:Returns:
3478
3507
3479
3508
`list`
@@ -3555,6 +3584,7 @@ def read(
3555
3584
implementation = self .implementation ,
3556
3585
select = select ,
3557
3586
info = info ,
3587
+ unpack = unpack ,
3558
3588
)
3559
3589
for var in f .vars
3560
3590
]
0 commit comments