@@ -75,7 +75,7 @@ def count_call_alleles(
75
75
from .aggregation_numba_fns import count_alleles
76
76
77
77
variables .validate (ds , {call_genotype : variables .call_genotype_spec })
78
- n_alleles = ds .dims ["alleles" ]
78
+ n_alleles = ds .sizes ["alleles" ]
79
79
G = da .asarray (ds [call_genotype ])
80
80
shape = (G .chunks [0 ], G .chunks [1 ], n_alleles )
81
81
# use numpy array to avoid dask task dependencies between chunks
@@ -170,8 +170,8 @@ def count_variant_alleles(
170
170
from .aggregation_numba_fns import count_alleles
171
171
172
172
variables .validate (ds , {call_genotype : variables .call_genotype_spec })
173
- n_alleles = ds .dims ["alleles" ]
174
- n_variant = ds .dims ["variants" ]
173
+ n_alleles = ds .sizes ["alleles" ]
174
+ n_variant = ds .sizes ["variants" ]
175
175
G = da .asarray (ds [call_genotype ]).reshape ((n_variant , - 1 ))
176
176
shape = (G .chunks [0 ], n_alleles )
177
177
# use uint64 dummy array to return uin64 counts array
@@ -227,7 +227,7 @@ def count_cohort_alleles(
227
227
>>> ds = sg.simulate_genotype_call_dataset(n_variant=5, n_sample=4)
228
228
229
229
>>> # Divide samples into two cohorts
230
- >>> ds["sample_cohort"] = xr.DataArray(np.repeat([0, 1], ds.dims ["samples"] // 2), dims="samples")
230
+ >>> ds["sample_cohort"] = xr.DataArray(np.repeat([0, 1], ds.sizes ["samples"] // 2), dims="samples")
231
231
>>> sg.display_genotypes(ds) # doctest: +NORMALIZE_WHITESPACE
232
232
samples S0 S1 S2 S3
233
233
variants
@@ -364,8 +364,8 @@ def count_variant_genotypes(
364
364
mixed_ploidy = ds [call_genotype ].attrs .get ("mixed_ploidy" , False )
365
365
if mixed_ploidy :
366
366
raise ValueError ("Mixed-ploidy dataset" )
367
- ploidy = ds .dims ["ploidy" ]
368
- n_alleles = ds .dims ["alleles" ]
367
+ ploidy = ds .sizes ["ploidy" ]
368
+ n_alleles = ds .sizes ["alleles" ]
369
369
n_genotypes = _comb_with_replacement (n_alleles , ploidy )
370
370
G = da .asarray (ds [call_genotype ].data )
371
371
N = np .empty (n_genotypes , np .uint64 )
@@ -432,8 +432,8 @@ def genotype_coords(
432
432
"""
433
433
from .conversion_numba_fns import _comb_with_replacement , _index_as_genotype
434
434
435
- n_alleles = ds .dims ["alleles" ]
436
- ploidy = ds .dims ["ploidy" ]
435
+ n_alleles = ds .sizes ["alleles" ]
436
+ ploidy = ds .sizes ["ploidy" ]
437
437
n_genotypes = _comb_with_replacement (n_alleles , ploidy )
438
438
max_chars = len (str (n_alleles - 1 ))
439
439
# dummy variable for ploidy dim also specifies output dtype
@@ -553,7 +553,7 @@ def cohort_allele_frequencies(
553
553
>>> ds = sg.simulate_genotype_call_dataset(n_variant=5, n_sample=4)
554
554
555
555
>>> # Divide samples into two cohorts
556
- >>> ds["sample_cohort"] = xr.DataArray(np.repeat([0, 1], ds.dims ["samples"] // 2), dims="samples")
556
+ >>> ds["sample_cohort"] = xr.DataArray(np.repeat([0, 1], ds.sizes ["samples"] // 2), dims="samples")
557
557
>>> sg.display_genotypes(ds) # doctest: +NORMALIZE_WHITESPACE
558
558
samples S0 S1 S2 S3
559
559
variants
0 commit comments