|
186 | 186 | " return g(item)" |
187 | 187 | ] |
188 | 188 | }, |
| 189 | + { |
| 190 | + "cell_type": "code", |
| 191 | + "execution_count": null, |
| 192 | + "metadata": {}, |
| 193 | + "outputs": [], |
| 194 | + "source": [ |
| 195 | + "#export\n", |
| 196 | + "def check_parallel_num(param_name, num_workers):\n", |
| 197 | + " if sys.platform == \"win32\" and IN_NOTEBOOK and num_workers > 0:\n", |
| 198 | + " print(\"Due to IPython and Windows limitation, python multiprocessing isn't available now.\")\n", |
| 199 | + " print(f\"So `{param_name}` is changed to 0 to avoid getting stuck\")\n", |
| 200 | + " num_workers = 0\n", |
| 201 | + " return num_workers" |
| 202 | + ] |
| 203 | + }, |
189 | 204 | { |
190 | 205 | "cell_type": "code", |
191 | 206 | "execution_count": null, |
|
221 | 236 | "text/markdown": [ |
222 | 237 | "<h4 id=\"ThreadPoolExecutor\" class=\"doc_header\"><code>class</code> <code>ThreadPoolExecutor</code><a href=\"\" class=\"source_link\" style=\"float:right\">[source]</a></h4>\n", |
223 | 238 | "\n", |
224 | | - "> <code>ThreadPoolExecutor</code>(**`max_workers`**=*`16`*, **`on_exc`**=*`print`*, **`pause`**=*`0`*, **\\*\\*`kwargs`**) :: [`ThreadPoolExecutor`](/parallel.html#ThreadPoolExecutor)\n", |
| 239 | + "> <code>ThreadPoolExecutor</code>(**`max_workers`**=*`20`*, **`on_exc`**=*`print`*, **`pause`**=*`0`*, **\\*\\*`kwargs`**) :: [`ThreadPoolExecutor`](/parallel.html#ThreadPoolExecutor)\n", |
225 | 240 | "\n", |
226 | 241 | "Same as Python's ThreadPoolExecutor, except can pass `max_workers==0` for serial execution" |
227 | 242 | ], |
|
248 | 263 | " \"Same as Python's ProcessPoolExecutor, except can pass `max_workers==0` for serial execution\"\n", |
249 | 264 | " def __init__(self, max_workers=defaults.cpus, on_exc=print, pause=0, **kwargs):\n", |
250 | 265 | " if max_workers is None: max_workers=defaults.cpus\n", |
| 266 | + " max_workers = check_parallel_num('max_workers', max_workers)\n", |
251 | 267 | " store_attr()\n", |
252 | 268 | " self.not_parallel = max_workers==0\n", |
253 | 269 | " if self.not_parallel: max_workers=1\n", |
|
272 | 288 | "text/markdown": [ |
273 | 289 | "<h4 id=\"ProcessPoolExecutor\" class=\"doc_header\"><code>class</code> <code>ProcessPoolExecutor</code><a href=\"\" class=\"source_link\" style=\"float:right\">[source]</a></h4>\n", |
274 | 290 | "\n", |
275 | | - "> <code>ProcessPoolExecutor</code>(**`max_workers`**=*`16`*, **`on_exc`**=*`print`*, **`pause`**=*`0`*, **\\*\\*`kwargs`**) :: [`ProcessPoolExecutor`](/parallel.html#ProcessPoolExecutor)\n", |
| 291 | + "> <code>ProcessPoolExecutor</code>(**`max_workers`**=*`20`*, **`on_exc`**=*`print`*, **`pause`**=*`0`*, **\\*\\*`kwargs`**) :: [`ProcessPoolExecutor`](/parallel.html#ProcessPoolExecutor)\n", |
276 | 292 | "\n", |
277 | 293 | "Same as Python's ProcessPoolExecutor, except can pass `max_workers==0` for serial execution" |
278 | 294 | ], |
|
305 | 321 | "metadata": {}, |
306 | 322 | "outputs": [], |
307 | 323 | "source": [ |
308 | | - "#export \n", |
| 324 | + "#export\n", |
309 | 325 | "def parallel(f, items, *args, n_workers=defaults.cpus, total=None, progress=None, pause=0,\n", |
310 | 326 | " threadpool=False, timeout=None, chunksize=1, **kwargs):\n", |
311 | 327 | " \"Applies `func` in parallel to `items`, using `n_workers`\"\n", |
|
329 | 345 | " return x+a\n", |
330 | 346 | "\n", |
331 | 347 | "inp,exp = range(50),range(1,51)\n", |
332 | | - "if sys.platform != \"win32\":\n", |
333 | | - " test_eq(parallel(add_one, inp, n_workers=2, progress=False), exp)\n", |
334 | | - " test_eq(parallel(add_one, inp, threadpool=True, n_workers=2, progress=False), exp)\n", |
335 | | - " test_eq(parallel(add_one, inp, n_workers=1, a=2), range(2,52))\n", |
| 348 | + "\n", |
| 349 | + "test_eq(parallel(add_one, inp, n_workers=2, progress=False), exp)\n", |
| 350 | + "test_eq(parallel(add_one, inp, threadpool=True, n_workers=2, progress=False), exp)\n", |
| 351 | + "test_eq(parallel(add_one, inp, n_workers=1, a=2), range(2,52))\n", |
336 | 352 | "test_eq(parallel(add_one, inp, n_workers=0), exp)\n", |
337 | 353 | "test_eq(parallel(add_one, inp, n_workers=0, a=2), range(2,52))" |
338 | 354 | ] |
|
362 | 378 | "name": "stdout", |
363 | 379 | "output_type": "stream", |
364 | 380 | "text": [ |
365 | | - "0 2021-01-22 21:17:38.942321\n", |
366 | | - "1 2021-01-22 21:17:39.192929\n", |
367 | | - "2 2021-01-22 21:17:39.444098\n", |
368 | | - "3 2021-01-22 21:17:39.695087\n", |
369 | | - "4 2021-01-22 21:17:39.946463\n" |
| 381 | + "0 2021-02-03 09:51:30.561681\n", |
| 382 | + "1 2021-02-03 09:51:30.812066\n", |
| 383 | + "2 2021-02-03 09:51:31.063662\n", |
| 384 | + "3 2021-02-03 09:51:31.313478\n", |
| 385 | + "4 2021-02-03 09:51:31.564776\n" |
370 | 386 | ] |
371 | 387 | } |
372 | 388 | ], |
|
375 | 391 | " time.sleep(random.random()/1000)\n", |
376 | 392 | " print(i, datetime.now())\n", |
377 | 393 | "\n", |
378 | | - "test_n_workers = 0 if sys.platform == \"win32\" else 2\n", |
379 | | - "parallel(print_time, range(5), n_workers=test_n_workers, pause=0.25);" |
| 394 | + "parallel(print_time, range(5), n_workers=2, pause=0.25);" |
380 | 395 | ] |
381 | 396 | }, |
382 | 397 | { |
|
423 | 438 | "#export \n", |
424 | 439 | "def parallel_gen(cls, items, n_workers=defaults.cpus, **kwargs):\n", |
425 | 440 | " \"Instantiate `cls` in `n_workers` procs & call each on a subset of `items` in parallel.\"\n", |
| 441 | + " n_workers = check_parallel_num('n_workers', n_workers)\n", |
426 | 442 | " if n_workers==0:\n", |
427 | 443 | " yield from enumerate(list(cls(**kwargs)(items)))\n", |
428 | 444 | " return\n", |
|
461 | 477 | "idxs,dat1 = zip(*res.sorted(itemgetter(0)))\n", |
462 | 478 | "test_eq(dat1, range(1,6))\n", |
463 | 479 | "\n", |
464 | | - "if sys.platform != \"win32\":\n", |
465 | | - " res = L(parallel_gen(_C, items, n_workers=3))\n", |
466 | | - " idxs,dat2 = zip(*res.sorted(itemgetter(0)))\n", |
467 | | - " test_eq(dat2, dat1)" |
| 480 | + "res = L(parallel_gen(_C, items, n_workers=3))\n", |
| 481 | + "idxs,dat2 = zip(*res.sorted(itemgetter(0)))\n", |
| 482 | + "test_eq(dat2, dat1)" |
468 | 483 | ] |
469 | 484 | }, |
470 | 485 | { |
|
500 | 515 | " yield k+self.a\n", |
501 | 516 | "\n", |
502 | 517 | "x = np.linspace(0,0.99,20)\n", |
503 | | - "test_n_workers = 0 if sys.platform == \"win32\" else 2\n", |
504 | | - "res = L(parallel_gen(TestSleepyBatchFunc, x, n_workers=test_n_workers))\n", |
| 518 | + "\n", |
| 519 | + "res = L(parallel_gen(TestSleepyBatchFunc, x, n_workers=2))\n", |
505 | 520 | "test_eq(res.sorted().itemgot(1), x+1)" |
506 | 521 | ] |
507 | 522 | }, |
|
546 | 561 | "execution_count": null, |
547 | 562 | "metadata": {}, |
548 | 563 | "outputs": [], |
549 | | - "source": [] |
| 564 | + "source": [ |
| 565 | + "from subprocess import Popen, PIPE\n", |
| 566 | + "# test num_workers > 0 in scripts works when python process start method is spawn\n", |
| 567 | + "process = Popen([\"python\", \"parallel_test.py\"], stdout=PIPE)\n", |
| 568 | + "_, err = process.communicate(timeout=5)\n", |
| 569 | + "exit_code = process.wait()\n", |
| 570 | + "test_eq(exit_code, 0)" |
| 571 | + ] |
550 | 572 | } |
551 | 573 | ], |
552 | 574 | "metadata": { |
|
0 commit comments