Skip to content

Commit c607283

Browse files
committed
Example formating
1 parent f72fce6 commit c607283

File tree

1 file changed

+34
-34
lines changed

1 file changed

+34
-34
lines changed

examples/plot_matrixmult.py

Lines changed: 34 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -15,15 +15,14 @@
1515

1616
np.random.seed(42)
1717

18-
comm = MPI.COMM_WORLD
19-
rank = comm.Get_rank()
20-
nProcs = comm.Get_size()
18+
comm = MPI.COMM_WORLD
19+
rank = comm.Get_rank()
20+
n_procs = comm.Get_size()
2121

22+
P_prime = int(math.ceil(math.sqrt(n_procs)))
23+
C = int(math.ceil(n_procs / P_prime))
2224

23-
P_prime = int(math.ceil(math.sqrt(nProcs)))
24-
C = int(math.ceil(nProcs / P_prime))
25-
26-
if P_prime * C < nProcs:
25+
if P_prime * C < n_procs:
2726
print("No. of procs has to be a square number")
2827
exit(-1)
2928

@@ -39,45 +38,47 @@
3938
my_layer = rank // P_prime
4039

4140
# sub‐communicators
42-
layer_comm = comm.Split(color=my_layer, key=my_group) # all procs in same layer
43-
group_comm = comm.Split(color=my_group, key=my_layer) # all procs in same group
41+
layer_comm = comm.Split(color=my_layer, key=my_group) # all procs in same layer
42+
group_comm = comm.Split(color=my_group, key=my_layer) # all procs in same group
4443

4544
# Each rank will end up with:
4645
# A_p: shape (my_own_rows, K)
4746
# B_p: shape (K, my_own_cols)
4847
# where
49-
row_start = my_group * blk_rows
50-
row_end = min(M, row_start + blk_rows)
48+
row_start = my_group * blk_rows
49+
row_end = min(M, row_start + blk_rows)
5150
my_own_rows = row_end - row_start
5251

53-
col_start = my_group * blk_cols # note: same my_group index on cols
54-
col_end = min(N, col_start + blk_cols)
52+
col_start = my_group * blk_cols # note: same my_group index on cols
53+
col_end = min(N, col_start + blk_cols)
5554
my_own_cols = col_end - col_start
5655

5756
# ======================= BROADCASTING THE SLICES =======================
5857
if rank == 0:
59-
A = np.arange(M*K, dtype=np.float32).reshape(M, K)
60-
B = np.arange(K*N, dtype=np.float32).reshape(K, N)
61-
for dest in range(nProcs):
58+
A = np.arange(M * K, dtype=np.float32).reshape(M, K)
59+
B = np.arange(K * N, dtype=np.float32).reshape(K, N)
60+
for dest in range(n_procs):
6261
pg = dest % P_prime
63-
rs = pg*blk_rows; re = min(M, rs+blk_rows)
64-
cs = pg*blk_cols; ce = min(N, cs+blk_cols)
65-
a_block , b_block = A[rs:re, :].copy(), B[:, cs:ce].copy()
62+
rs = pg * blk_rows;
63+
re = min(M, rs + blk_rows)
64+
cs = pg * blk_cols;
65+
ce = min(N, cs + blk_cols)
66+
a_block, b_block = A[rs:re, :].copy(), B[:, cs:ce].copy()
6667
if dest == 0:
6768
A_p, B_p = a_block, b_block
6869
else:
69-
comm.Send(a_block, dest=dest, tag=100+dest)
70-
comm.Send(b_block, dest=dest, tag=200+dest)
70+
comm.Send(a_block, dest=dest, tag=100 + dest)
71+
comm.Send(b_block, dest=dest, tag=200 + dest)
7172
else:
7273
A_p = np.empty((my_own_rows, K), dtype=np.float32)
7374
B_p = np.empty((K, my_own_cols), dtype=np.float32)
74-
comm.Recv(A_p, source=0, tag=100+rank)
75-
comm.Recv(B_p, source=0, tag=200+rank)
75+
comm.Recv(A_p, source=0, tag=100 + rank)
76+
comm.Recv(B_p, source=0, tag=200 + rank)
7677

7778
comm.Barrier()
7879

79-
Aop = MPISUMMAMatrixMult(A_p, N)
80-
col_lens = comm.allgather(my_own_cols)
80+
Aop = MPISUMMAMatrixMult(A_p, N)
81+
col_lens = comm.allgather(my_own_cols)
8182
total_cols = np.add.reduce(col_lens, 0)
8283
x = DistributedArray(global_shape=K * total_cols,
8384
local_shapes=[K * col_len for col_len in col_lens],
@@ -88,27 +89,26 @@
8889
y = Aop @ x
8990

9091
# ======================= VERIFICATION =================-=============
91-
A = np.arange(M*K).reshape(M, K).astype(np.float32)
92-
B = np.arange(K*N).reshape(K, N).astype(np.float32)
92+
A = np.arange(M * K).reshape(M, K).astype(np.float32)
93+
B = np.arange(K * N).reshape(K, N).astype(np.float32)
9394
C_true = A @ B
9495
Z_true = (A.T.dot(C_true.conj())).conj()
9596

96-
97-
col_start = my_layer * blk_cols # note: same my_group index on cols
98-
col_end = min(N, col_start + blk_cols)
97+
col_start = my_layer * blk_cols # note: same my_group index on cols
98+
col_end = min(N, col_start + blk_cols)
9999
my_own_cols = col_end - col_start
100-
expected_y = C_true[:,col_start:col_end].flatten()
100+
expected_y = C_true[:, col_start:col_end].flatten()
101101

102102
if not np.allclose(y.local_array, expected_y, atol=1e-6, rtol=1e-14):
103103
print(f"RANK {rank}: FORWARD VERIFICATION FAILED")
104-
print(f'{rank} local: {y.local_array}, expected: {C_true[:,col_start:col_end]}')
104+
print(f'{rank} local: {y.local_array}, expected: {C_true[:, col_start:col_end]}')
105105
else:
106106
print(f"RANK {rank}: FORWARD VERIFICATION PASSED")
107107

108108
z = Aop.H @ y
109-
expected_z = Z_true[:,col_start:col_end].flatten()
109+
expected_z = Z_true[:, col_start:col_end].flatten()
110110
if not np.allclose(z.local_array, expected_z, atol=1e-6, rtol=1e-14):
111111
print(f"RANK {rank}: ADJOINT VERIFICATION FAILED")
112-
print(f'{rank} local: {z.local_array}, expected: {Z_true[:,col_start:col_end]}')
112+
print(f'{rank} local: {z.local_array}, expected: {Z_true[:, col_start:col_end]}')
113113
else:
114114
print(f"RANK {rank}: ADJOINT VERIFICATION PASSED")

0 commit comments

Comments
 (0)