@@ -511,9 +511,15 @@ def group_keys(self):
511
511
else :
512
512
dir_name = meta_root + self ._path
513
513
group_sfx = '.group' + self ._metadata_key_suffix
514
- for key in sorted (listdir (self ._store , dir_name )):
514
+ # The fact that we call sorted means this can't be a streaming generator.
515
+ # The keys are already in memory.
516
+ all_keys = sorted (listdir (self ._store , dir_name ))
517
+ for key in all_keys :
515
518
if key .endswith (group_sfx ):
516
519
key = key [:- len (group_sfx )]
520
+ if key in all_keys :
521
+ # otherwise we will double count this group
522
+ continue
517
523
path = self ._key_prefix + key
518
524
if path .endswith (".array" + self ._metadata_key_suffix ):
519
525
# skip array keys
@@ -552,24 +558,16 @@ def groups(self):
552
558
zarr_version = self ._version )
553
559
554
560
else :
555
- dir_name = meta_root + self ._path
556
- group_sfx = '.group' + self ._metadata_key_suffix
557
- for key in sorted (listdir (self ._store , dir_name )):
558
- if key .endswith (group_sfx ):
559
- key = key [:- len (group_sfx )]
561
+ for key in self .group_keys ():
560
562
path = self ._key_prefix + key
561
- if path .endswith (".array" + self ._metadata_key_suffix ):
562
- # skip array keys
563
- continue
564
- if contains_group (self ._store , path , explicit_only = False ):
565
- yield key , Group (
566
- self ._store ,
567
- path = path ,
568
- read_only = self ._read_only ,
569
- chunk_store = self ._chunk_store ,
570
- cache_attrs = self .attrs .cache ,
571
- synchronizer = self ._synchronizer ,
572
- zarr_version = self ._version )
563
+ yield key , Group (
564
+ self ._store ,
565
+ path = path ,
566
+ read_only = self ._read_only ,
567
+ chunk_store = self ._chunk_store ,
568
+ cache_attrs = self .attrs .cache ,
569
+ synchronizer = self ._synchronizer ,
570
+ zarr_version = self ._version )
573
571
574
572
def array_keys (self , recurse = False ):
575
573
"""Return an iterator over member names for arrays only.
0 commit comments