Skip to content

Commit d48e12a

Browse files
committed
python3.9
1 parent f7dc7d6 commit d48e12a

File tree

24 files changed

+96
-107
lines changed

24 files changed

+96
-107
lines changed

.pre-commit-config.yaml

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -3,40 +3,40 @@ repos:
33
rev: v4.6.0
44
hooks:
55
- id: trailing-whitespace
6-
language_version: python3.11
6+
language_version: python3.9
77
- id: end-of-file-fixer
88
exclude: ^docs/
9-
language_version: python3.11
9+
language_version: python3.9
1010

1111
- repo: https://github.com/adrienverge/yamllint.git
1212
rev: v1.35.1
1313
hooks:
1414
- id: yamllint
1515
args: [-c=.yamllint.yml]
16-
language_version: python3.11
16+
language_version: python3.9
1717

1818
- repo: https://github.com/asottile/pyupgrade
1919
rev: v3.16.0
2020
hooks:
2121
- id: pyupgrade
22-
args: [--py3-plus]
23-
language_version: python3.11
22+
args: [--py39-plus]
23+
language_version: python3.9
2424

2525
- repo: https://github.com/google/yapf
2626
rev: v0.43.0
2727
hooks:
2828
- id: yapf
2929
name: Format code
3030
additional_dependencies: [toml]
31-
language_version: python3.11
31+
language_version: python3.9
3232

3333
- repo: https://github.com/pycqa/isort
3434
rev: 5.13.2
3535
hooks:
3636
- id: isort
3737
name: Sort imports
3838
# args: [--line-width, "120", --profile, black]
39-
language_version: python3.11
39+
language_version: python3.9
4040

4141
- repo: https://github.com/PyCQA/docformatter
4242
rev: eb1df34
@@ -45,7 +45,7 @@ repos:
4545
name: Format docstring
4646
additional_dependencies: [tomli]
4747
args: [--config, pyproject.toml]
48-
language_version: python3.11
48+
language_version: python3.9
4949

5050
- repo: https://github.com/executablebooks/mdformat
5151
rev: 0.7.17
@@ -55,4 +55,4 @@ repos:
5555
additional_dependencies:
5656
- mdformat-gfm
5757
- mdformat-tables
58-
language_version: python3.11
58+
language_version: python3.9

dance/atlas/sc_similarity/anndata_similarity.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -458,9 +458,9 @@ def get_targets(dataset_truth: str):
458458
sim_targets.append((sum(x for x, y in sim_targets), sum(y for x, y in sim_targets)))
459459
return sim_targets
460460

461-
def compute_similarity(self, random_state: int, methods: List[str] = [
461+
def compute_similarity(self, random_state: int, methods: list[str] = [
462462
'cosine', 'pearson', 'jaccard', 'js_distance', 'otdd', 'common_genes_num', "ground_truth", "metadata_sim"
463-
], origin=False) -> Dict[str, float]:
463+
], origin=False) -> dict[str, float]:
464464
"""Compute multiple similarity metrics between datasets.
465465
466466
Parameters
@@ -526,11 +526,11 @@ def compute_similarity(self, random_state: int, methods: List[str] = [
526526
return results
527527

528528
def get_similarity_matrix_A2B(
529-
self, methods: List[str] = [
529+
self, methods: list[str] = [
530530
"wasserstein", "Hausdorff", "chamfer", "energy", "sinkhorn2", "bures", "spectral", "common_genes_num",
531531
"ground_truth", "metadata_sim", "mmd"
532532
]
533-
) -> Dict[str, float]:
533+
) -> dict[str, float]:
534534
"""Same as compute_similarity, keeping method name consistency."""
535535
cumulative_results = {method: 0.0 for method in methods}
536536

dance/datasets/singlemodality.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -518,7 +518,7 @@ def is_complete(self):
518518

519519
for i in check:
520520
if not osp.exists(i):
521-
logger.info("file {} doesn't exist".format(i))
521+
logger.info(f"file {i} doesn't exist")
522522
return False
523523
return True
524524

dance/datasets/spatial.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ def __init__(self, root=".", full_download=False, data_id="151673", data_dir="da
5151
super().__init__(root, full_download)
5252

5353
self.data_id = data_id
54-
self.data_dir = data_dir + "/{}".format(data_id)
54+
self.data_dir = data_dir + f"/{data_id}"
5555
self.sample_file = sample_file
5656

5757
def download_all(self):

dance/modules/multi_modality/joint_embedding/dcca.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ def build_multi_layers(layers, use_batch_norm=True, dropout_rate=0.1):
102102
if dropout_rate > 0:
103103
fc_layers = nn.Sequential(
104104
collections.OrderedDict([(
105-
"Layer {}".format(i),
105+
f"Layer {i}",
106106
nn.Sequential(
107107
nn.Linear(n_in, n_out),
108108
nn.BatchNorm1d(n_out, momentum=0.01, eps=0.001),
@@ -113,7 +113,7 @@ def build_multi_layers(layers, use_batch_norm=True, dropout_rate=0.1):
113113
else:
114114
fc_layers = nn.Sequential(
115115
collections.OrderedDict([(
116-
"Layer {}".format(i),
116+
f"Layer {i}",
117117
nn.Sequential(
118118
nn.Linear(n_in, n_out),
119119
nn.BatchNorm1d(n_out, momentum=0.01, eps=0.001),

dance/modules/multi_modality/joint_embedding/scmvae.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,7 @@ def build_multi_layers(layers, use_batch_norm=True, dropout_rate=0.1):
129129
if dropout_rate > 0:
130130
fc_layers = nn.Sequential(
131131
collections.OrderedDict([(
132-
"Layer {}".format(i),
132+
f"Layer {i}",
133133
nn.Sequential(
134134
nn.Linear(n_in, n_out),
135135
nn.BatchNorm1d(n_out, momentum=0.01, eps=0.001),
@@ -141,7 +141,7 @@ def build_multi_layers(layers, use_batch_norm=True, dropout_rate=0.1):
141141
else:
142142
fc_layers = nn.Sequential(
143143
collections.OrderedDict([(
144-
"Layer {}".format(i),
144+
f"Layer {i}",
145145
nn.Sequential(
146146
nn.Linear(n_in, n_out),
147147
nn.BatchNorm1d(n_out, momentum=0.01, eps=0.001),

dance/modules/multi_modality/match_modality/cmae.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ def calc_gen_loss_half(self, input_fake):
8585
all1 = Variable(torch.ones_like(out0.data).cuda(), requires_grad=False)
8686
loss += torch.mean(F.binary_cross_entropy(F.sigmoid(out0), all1))
8787
else:
88-
assert 0, "Unsupported GAN type: {}".format(self.gan_type)
88+
assert 0, f"Unsupported GAN type: {self.gan_type}"
8989
return loss
9090

9191

@@ -221,7 +221,7 @@ def init_fun(m):
221221
elif init_type == 'default':
222222
pass
223223
else:
224-
assert 0, "Unsupported initialization: {}".format(init_type)
224+
assert 0, f"Unsupported initialization: {init_type}"
225225
if hasattr(m, 'bias') and m.bias is not None:
226226
init.constant_(m.bias.data, 0.0)
227227

dance/modules/multi_modality/match_modality/scmm.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -549,15 +549,15 @@ def fit(self, x_train, y_train, val_ratio=0.15):
549549
optimizer.step()
550550
b_loss += loss.item()
551551
if self.params.print_freq > 0 and i % self.params.print_freq == 0:
552-
print("iteration {:04d}: loss: {:6.3f}".format(i, loss.item() / self.params.batch_size))
552+
print(f"iteration {i:04d}: loss: {loss.item() / self.params.batch_size:6.3f}")
553553
tr.append(b_loss / len(train_loader.dataset))
554-
print('====> Epoch: {:03d} Train loss: {:.4f}'.format(epoch, tr[-1]))
554+
print(f'====> Epoch: {epoch:03d} Train loss: {tr[-1]:.4f}')
555555

556556
if torch.isnan(torch.tensor([b_loss])):
557557
break
558558

559559
vals.append(self.score(train_mod1[val_idx], train_mod2[val_idx], metric='loss'))
560-
print('====> Valid loss: {:.4f}'.format(vals[-1]))
560+
print(f'====> Valid loss: {vals[-1]:.4f}')
561561

562562
if vals[-1] == min(vals):
563563
if not os.path.exists('models'):

dance/modules/multi_modality/predict_modality/babel.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -249,7 +249,7 @@ class ChromEncoder(nn.Module):
249249
processing everything to be the same dimensionality, concatenate to form a single
250250
latent dimension."""
251251

252-
def __init__(self, num_inputs: List[int], latent_dim: int = 32, activation=nn.PReLU):
252+
def __init__(self, num_inputs: list[int], latent_dim: int = 32, activation=nn.PReLU):
253253
super().__init__()
254254
self.num_inputs = num_inputs
255255
self.act = activation
@@ -290,7 +290,7 @@ class ChromDecoder(nn.Module):
290290

291291
def __init__(
292292
self,
293-
num_outputs: List[int], # Per-chromosome list of output sizes
293+
num_outputs: list[int], # Per-chromosome list of output sizes
294294
latent_dim: int = 32,
295295
activation=nn.PReLU,
296296
final_activations=[Exp(), ClippedSoftplus()],
@@ -513,7 +513,7 @@ def forward_single(self, x, size_factors=None, in_domain: int = 1, out_domain: i
513513
decoded = decoder(encoded)
514514
return self._combine_output_and_encoded(decoded, encoded, num_non_latent_out)
515515

516-
def forward(self, x, size_factors=None, mode: Union[None, Tuple[int, int]] = None):
516+
def forward(self, x, size_factors=None, mode: Union[None, tuple[int, int]] = None):
517517
if self.flat_mode:
518518
x = self.split_catted_input(x)
519519
assert isinstance(x, (tuple, list))
@@ -552,11 +552,11 @@ def __init__(
552552
input_dim1: int,
553553
input_dim2: int,
554554
hidden_dim: int = 16,
555-
final_activations1: Union[Callable, List[Callable]] = [
555+
final_activations1: Union[Callable, list[Callable]] = [
556556
Exp(),
557557
ClippedSoftplus(),
558558
],
559-
final_activations2: Union[Callable, List[Callable]] = nn.Sigmoid(),
559+
final_activations2: Union[Callable, list[Callable]] = nn.Sigmoid(),
560560
flat_mode: bool = True, # Controls if we have to re-split inputs
561561
seed: int = 182822,
562562
):
@@ -633,7 +633,7 @@ def forward_single(self, x, size_factors=None, in_domain: int = 1, out_domain: i
633633
assert isinstance(retval[0], (torch.TensorType, torch.Tensor))
634634
return retval
635635

636-
def forward(self, x, size_factors=None, mode: Union[None, Tuple[int, int]] = None):
636+
def forward(self, x, size_factors=None, mode: Union[None, tuple[int, int]] = None):
637637
if self.flat_mode:
638638
x = self.split_catted_input(x)
639639
assert isinstance(x, (tuple, list))
@@ -662,7 +662,7 @@ class AssymSplicedAutoEncoder(SplicedAutoEncoder):
662662
def __init__(
663663
self,
664664
input_dim1: int,
665-
input_dim2: List[int],
665+
input_dim2: list[int],
666666
hidden_dim: int = 16,
667667
final_activations1: list = [Exp(), ClippedSoftplus()],
668668
final_activations2=nn.Sigmoid(),

dance/modules/multi_modality/predict_modality/cmae.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ def calc_gen_loss_half(self, input_fake):
8484
all1 = Variable(torch.ones_like(out0.data).cuda(), requires_grad=False)
8585
loss += torch.mean(F.binary_cross_entropy(F.sigmoid(out0), all1))
8686
else:
87-
assert 0, "Unsupported GAN type: {}".format(self.gan_type)
87+
assert 0, f"Unsupported GAN type: {self.gan_type}"
8888
return loss
8989

9090

@@ -219,7 +219,7 @@ def init_fun(m):
219219
elif init_type == 'default':
220220
pass
221221
else:
222-
assert 0, "Unsupported initialization: {}".format(init_type)
222+
assert 0, f"Unsupported initialization: {init_type}"
223223
if hasattr(m, 'bias') and m.bias is not None:
224224
init.constant_(m.bias.data, 0.0)
225225

0 commit comments

Comments
 (0)