Skip to content

Commit ec88371

Browse files
committed
fix flake8
1 parent 0138e3a commit ec88371

File tree

4 files changed

+17
-20
lines changed

4 files changed

+17
-20
lines changed

pylops_mpi/Distributed.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,8 @@ class DistributedMixIn:
2222
MPI installation is available, the latter with CuPy arrays when a CUDA-Aware
2323
MPI installation is not available).
2424
"""
25-
def _allreduce(self, base_comm, base_comm_nccl,
26-
send_buf, recv_buf=None, op: MPI.Op = MPI.SUM,
25+
def _allreduce(self, base_comm, base_comm_nccl,
26+
send_buf, recv_buf=None, op: MPI.Op = MPI.SUM,
2727
engine="numpy"):
2828
"""Allreduce operation
2929
"""
@@ -33,7 +33,7 @@ def _allreduce(self, base_comm, base_comm_nccl,
3333
return mpi_allreduce(base_comm, send_buf,
3434
recv_buf, engine, op)
3535

36-
def _allreduce_subcomm(self, sub_comm, base_comm_nccl,
36+
def _allreduce_subcomm(self, sub_comm, base_comm_nccl,
3737
send_buf, recv_buf=None, op: MPI.Op = MPI.SUM,
3838
engine="numpy"):
3939
"""Allreduce operation with subcommunicator
@@ -44,7 +44,7 @@ def _allreduce_subcomm(self, sub_comm, base_comm_nccl,
4444
return mpi_allreduce(sub_comm, send_buf,
4545
recv_buf, engine, op)
4646

47-
def _allgather(self, base_comm, base_comm_nccl,
47+
def _allgather(self, base_comm, base_comm_nccl,
4848
send_buf, recv_buf=None,
4949
engine="numpy"):
5050
"""Allgather operation
@@ -85,7 +85,7 @@ def _bcast(self, local_array, index, value):
8585
# self.local_array[index] = self.base_comm.bcast(value)
8686
mpi_bcast(self.base_comm, self.rank, self.local_array, index, value,
8787
engine=self.engine)
88-
88+
8989
def _send(self, send_buf, dest, count=None, tag=0):
9090
"""Send operation
9191
"""

pylops_mpi/DistributedArray.py

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -341,7 +341,7 @@ def local_shapes(self):
341341
return self._nccl_local_shapes(False)
342342
else:
343343
return self._allgather(self.base_comm,
344-
self.base_comm_nccl,
344+
self.base_comm_nccl,
345345
self.local_shape)
346346

347347
@property
@@ -383,7 +383,7 @@ def asarray(self, masked: bool = False):
383383
final_array = self._allgather_subcomm(self.local_array)
384384
else:
385385
final_array = self._allgather(self.base_comm,
386-
self.base_comm_nccl,
386+
self.base_comm_nccl,
387387
self.local_array,
388388
engine=self.engine)
389389
return np.concatenate(final_array, axis=self.axis)
@@ -484,7 +484,7 @@ def _nccl_local_shapes(self, masked: bool):
484484
all_tuples = self._allgather_subcomm(self.local_shape).get()
485485
else:
486486
all_tuples = self._allgather(self.base_comm,
487-
self.base_comm_nccl,
487+
self.base_comm_nccl,
488488
self.local_shape).get()
489489
# NCCL returns the flat array that packs every tuple as 1-dimensional array
490490
# unpack each tuple from each rank
@@ -625,12 +625,12 @@ def _compute_vector_norm(self, local_array: NDArray,
625625
# CuPy + non-CUDA-aware MPI: This will call non-buffered communication
626626
# which return a list of object - must be copied back to a GPU memory.
627627
recv_buf = self._allreduce_subcomm(self.sub_comm, self.base_comm_nccl,
628-
send_buf.get(), recv_buf.get(),
628+
send_buf.get(), recv_buf.get(),
629629
op=MPI.MAX, engine=self.engine)
630630
recv_buf = ncp.asarray(ncp.squeeze(recv_buf, axis=axis))
631631
else:
632632
recv_buf = self._allreduce_subcomm(self.sub_comm, self.base_comm_nccl,
633-
send_buf, recv_buf, op=MPI.MAX,
633+
send_buf, recv_buf, op=MPI.MAX,
634634
engine=self.engine)
635635
# TODO (tharitt): In current implementation, there seems to be a semantic difference between Buffered MPI and NCCL
636636
# the (1, size) is collapsed to (size, ) with buffered MPI while NCCL retains it.
@@ -643,18 +643,18 @@ def _compute_vector_norm(self, local_array: NDArray,
643643
send_buf = ncp.min(ncp.abs(local_array), axis=axis).astype(ncp.float64)
644644
if self.engine == "cupy" and self.base_comm_nccl is None and not deps.cuda_aware_mpi_enabled:
645645
recv_buf = self._allreduce_subcomm(self.sub_comm, self.base_comm_nccl,
646-
send_buf.get(), recv_buf.get(),
646+
send_buf.get(), recv_buf.get(),
647647
op=MPI.MIN, engine=self.engine)
648648
recv_buf = ncp.asarray(ncp.squeeze(recv_buf, axis=axis))
649649
else:
650650
recv_buf = self._allreduce_subcomm(self.sub_comm, self.base_comm_nccl,
651-
send_buf, recv_buf,
651+
send_buf, recv_buf,
652652
op=MPI.MIN, engine=self.engine)
653653
if self.base_comm_nccl:
654654
recv_buf = ncp.asarray(ncp.squeeze(recv_buf, axis=axis))
655655
else:
656656
recv_buf = self._allreduce_subcomm(self.sub_comm, self.base_comm_nccl,
657-
ncp.sum(ncp.abs(ncp.float_power(local_array, ord)), axis=axis),
657+
ncp.sum(ncp.abs(ncp.float_power(local_array, ord)), axis=axis),
658658
engine=self.engine)
659659
recv_buf = ncp.power(recv_buf, 1.0 / ord)
660660
return recv_buf

pylops_mpi/basicoperators/VStack.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -135,17 +135,17 @@ def _matvec(self, x: DistributedArray) -> DistributedArray:
135135
def _rmatvec(self, x: DistributedArray) -> DistributedArray:
136136
ncp = get_module(x.engine)
137137
y = DistributedArray(global_shape=self.shape[1],
138-
base_comm=x.base_comm,
139-
base_comm_nccl=x.base_comm_nccl,
138+
base_comm=x.base_comm,
139+
base_comm_nccl=x.base_comm_nccl,
140140
partition=Partition.BROADCAST,
141141
engine=x.engine,
142142
dtype=self.dtype)
143143
y1 = []
144144
for iop, oper in enumerate(self.ops):
145145
y1.append(oper.rmatvec(x.local_array[self.nnops[iop]: self.nnops[iop + 1]]))
146146
y1 = ncp.sum(ncp.vstack(y1), axis=0)
147-
y[:] = self._allreduce(x.base_comm, x.base_comm_nccl,
148-
y1, op=MPI.SUM, engine=x.engine)
147+
y[:] = self._allreduce(x.base_comm, x.base_comm_nccl,
148+
y1, op=MPI.SUM, engine=x.engine)
149149
return y
150150

151151

pylops_mpi/utils/_common.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -3,12 +3,9 @@
33
"_unroll_allgather_recv"
44
]
55

6-
from typing import Optional
76

87
import numpy as np
9-
from mpi4py import MPI
108
from pylops.utils.backend import get_module
11-
from pylops_mpi.utils import deps
129

1310

1411
# TODO: return type annotation for both cupy and numpy

0 commit comments

Comments
 (0)