Skip to content

Commit fca0271

Browse files
authored
Merge pull request #4 from gjbex/development
Development
2 parents c2204fa + fa7c42e commit fca0271

File tree

4 files changed

+134
-0
lines changed

4 files changed

+134
-0
lines changed
Binary file not shown.
Lines changed: 77 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,77 @@
1+
#!/usr/bin/env python
2+
3+
from argparse import ArgumentParser
4+
from collections import Counter
5+
import multiprocessing as mp
6+
from multiprocessing.managers import SharedMemoryManager
7+
import numpy as np
8+
import os
9+
import sys
10+
11+
12+
def init_z(nr_points):
13+
x = np.linspace(-1.8, 1.8, nr_points)
14+
y = np.linspace(-1.8j, 1.8j, nr_points)
15+
X, Y = np.meshgrid(x, np.flip(y))
16+
return (X + Y).ravel()
17+
18+
19+
def compute_partial_julia(args):
20+
z_shmem, n_shmem, idx_begin, idx_end, max_iters, max_norm = args
21+
z_size = np.dtype(np.complex).itemsize
22+
z_array = np.ndarray((idx_end - idx_begin, ), dtype=np.complex,
23+
buffer=z_shmem.buf[z_size*idx_begin:z_size*idx_end])
24+
n_size = np.dtype(np.int32).itemsize
25+
n = np.ndarray((idx_end - idx_begin, ), dtype=np.int32,
26+
buffer=n_shmem.buf[n_size*idx_begin:n_size*idx_end])
27+
for i, z in enumerate(z_array):
28+
while (n[i] <= max_iters and np.abs(z) <= max_norm):
29+
z = z**2 - 0.622772 + 0.42193j
30+
n[i] += 1
31+
return os.getpid()
32+
33+
34+
def compute_julia(nr_points=100, pool_size=2, work_size=15, verbose=False,
35+
max_iters=255, max_norm=2.0):
36+
size = nr_points**2
37+
z_size = np.dtype(np.complex).itemsize
38+
n_size = np.dtype(np.int32).itemsize
39+
with SharedMemoryManager() as shmem_mgr:
40+
with mp.Pool(pool_size) as pool:
41+
z_shmem = shmem_mgr.SharedMemory(size=z_size*size)
42+
z_buf = np.ndarray((size, ), dtype=np.complex, buffer=z_shmem.buf)
43+
z_buf[:] = init_z(nr_points)
44+
n_shmem = shmem_mgr.SharedMemory(size=n_size*size)
45+
n_buf = np.ndarray((size, ), dtype=np.int32, buffer=n_shmem.buf)
46+
n_buf[:] = np.zeros((size, ), dtype=np.int32)
47+
args = [(z_shmem, n_shmem, i*work_size, min(z_buf.size, (i + 1)*work_size),
48+
max_iters, max_norm)
49+
for i in range(int(np.ceil(z_buf.size/work_size)))]
50+
if verbose:
51+
print(args, file=sys.stderr)
52+
pid_counter = Counter()
53+
for pid in pool.imap_unordered(compute_partial_julia, args):
54+
pid_counter[pid] += 1
55+
if verbose:
56+
print(pid_counter, file=sys.stderr)
57+
return n_buf.copy().reshape(nr_points, nr_points)
58+
59+
60+
def main():
61+
arg_parser = ArgumentParser(description='compute pi')
62+
arg_parser.add_argument('--pool_size', type=int, default=2, help='pool size')
63+
arg_parser.add_argument('--work_size', type=int, default=10,
64+
help='number of points per work item')
65+
arg_parser.add_argument('--nr_points', type=int, default=10,
66+
help='size of the image n x n')
67+
arg_parser.add_argument('--verbose', action='store_true', help='verbose output')
68+
options = arg_parser.parse_args()
69+
constructor = None
70+
n = compute_julia(nr_points=options.nr_points, pool_size=options.pool_size,
71+
work_size=options.work_size, verbose=options.verbose)
72+
np.savetxt(sys.stdout, n, fmt='%3d')
73+
return 0
74+
75+
if __name__ == '__main__':
76+
status = main()
77+
sys.exit(status)
21.3 KB
Binary file not shown.
Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
#!/usr/bin/env python
2+
3+
from argparse import ArgumentParser
4+
from multiprocessing import Pool
5+
from multiprocessing.managers import SharedMemoryManager
6+
import numpy as np
7+
import os
8+
import sys
9+
10+
def increment(args):
11+
shmem, dtype, incr, i_min, i_max = args
12+
t_size = np.dtype(dtype).itemsize
13+
data = np.ndarray((i_max - i_min, ), dtype=dtype,
14+
buffer=shmem.buf[t_size*i_min:t_size*i_max])
15+
for i in range(data.size):
16+
data[i] *= incr
17+
return os.getpid(), incr
18+
19+
20+
def compute(array_size, pool_size, chunk_size, verbose=False):
21+
with SharedMemoryManager() as shmem_manager:
22+
with Pool(pool_size) as pool:
23+
dtype = np.int32
24+
t_size = np.dtype(dtype).itemsize
25+
shmem_data = shmem_manager.SharedMemory(size=t_size*array_size**2)
26+
data = np.ndarray((array_size, array_size), dtype=dtype,
27+
buffer=shmem_data.buf)
28+
for i in range(data.shape[0]):
29+
for j in range(data.shape[1]):
30+
data[i, j] = i*array_size + j
31+
args = [(shmem_data, np.int32, i + 1, i*chunk_size,
32+
min((i + 1)*chunk_size, data.size))
33+
for i in range(int(np.ceil(data.size/chunk_size)))]
34+
for result in pool.imap_unordered(increment, args):
35+
if verbose:
36+
print(result)
37+
return data.copy()
38+
39+
if __name__ == '__main__':
40+
arg_parser = ArgumentParser(description='illustrating shared memory')
41+
arg_parser.add_argument('--pool_size', type=int, default=2,
42+
help='pool size')
43+
arg_parser.add_argument('--array_size', type=int, default=10,
44+
help='array size')
45+
arg_parser.add_argument('--chunk_size', type=int, default=10,
46+
help='chunk size')
47+
arg_parser.add_argument('--sum_only', action='store_true',
48+
help='only desplay sum of array elements')
49+
arg_parser.add_argument('--verbose', action='store_true',
50+
help='verbose output')
51+
options = arg_parser.parse_args()
52+
data = compute(array_size=options.array_size, pool_size=options.pool_size,
53+
chunk_size=options.chunk_size, verbose=options.verbose)
54+
if options.sum_only:
55+
print(data.sum())
56+
else:
57+
np.savetxt(sys.stdout,data, fmt='%5d')

0 commit comments

Comments
 (0)