Skip to content

Commit 2eb3225

Browse files
committed
testing.CrossValidation & co.: use forkserver process start method on OS X
1 parent 4021fb4 commit 2eb3225

File tree

1 file changed

+17
-5
lines changed

1 file changed

+17
-5
lines changed

Orange/evaluation/testing.py

Lines changed: 17 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import sys
12
import multiprocessing as mp
23
from itertools import product
34
from threading import Thread
@@ -314,6 +315,12 @@ def _mp_worker(train_data, test_data, test_i, fold_i, learner_i, learner, queue)
314315
return fold_i, learner_i, test_i, model, failed, predicted, probs
315316

316317

318+
def _mp_context():
319+
# Workaround for locks on Macintosh
320+
# https://pythonhosted.org/joblib/parallel.html#bad-interaction-of-multiprocessing-and-third-party-libraries
321+
return mp.get_context('forkserver' if sys.platform == 'darwin' else None)
322+
323+
317324
class CrossValidation(Results):
318325
"""
319326
K-fold cross validation.
@@ -381,7 +388,8 @@ def __init__(self, data, learners, k=10, random_state=0, store_data=False,
381388
# generators are concerned. I'm stumped.
382389
product(data_splits, enumerate(learners)))
383390

384-
with joblib.Parallel(n_jobs=n_jobs) as parallel:
391+
ctx = _mp_context()
392+
with joblib.Parallel(n_jobs=n_jobs, backend=ctx) as parallel:
385393
tasks = (joblib.delayed(_mp_worker)(*tup) for tup in args)
386394
thread = Thread(target=lambda: results.append(parallel(tasks)))
387395
thread.start()
@@ -446,7 +454,8 @@ def data_splits():
446454
for (fold_i, test_i, train, test) in data_splits()
447455
for (learner_i, learner) in enumerate(learners))
448456

449-
with joblib.Parallel(n_jobs=n_jobs) as parallel:
457+
ctx = _mp_context()
458+
with joblib.Parallel(n_jobs=n_jobs, backend=ctx) as parallel:
450459
tasks = (joblib.delayed(_mp_worker)(*tup) for tup in args)
451460
thread = Thread(target=lambda: results.append(parallel(tasks)))
452461
thread.start()
@@ -498,7 +507,8 @@ def data_splits():
498507
for (fold_i, test_i, train, test) in data_splits()
499508
for (learner_i, learner) in enumerate(learners))
500509

501-
with joblib.Parallel(n_jobs=n_jobs) as parallel:
510+
ctx = _mp_context()
511+
with joblib.Parallel(n_jobs=n_jobs, backend=ctx) as parallel:
502512
tasks = (joblib.delayed(_mp_worker)(*tup) for tup in args)
503513
thread = Thread(target=lambda: results.append(parallel(tasks)))
504514
thread.start()
@@ -565,7 +575,8 @@ def data_splits():
565575
for (fold_i, test_i, train, test), (learner_i, learner) in
566576
product(data_splits(), enumerate(learners)))
567577

568-
with joblib.Parallel(n_jobs=n_jobs) as parallel:
578+
ctx = _mp_context()
579+
with joblib.Parallel(n_jobs=n_jobs, backend=ctx) as parallel:
569580
tasks = (joblib.delayed(_mp_worker)(*tup) for tup in args)
570581
thread = Thread(target=lambda: results.append(parallel(tasks)))
571582
thread.start()
@@ -634,7 +645,8 @@ def data_splits():
634645
for (fold_i, test_i, train, test) in data_splits()
635646
for (learner_i, learner) in enumerate(learners))
636647

637-
with joblib.Parallel(n_jobs=n_jobs) as parallel:
648+
ctx = _mp_context()
649+
with joblib.Parallel(n_jobs=n_jobs, backend=ctx) as parallel:
638650
tasks = (joblib.delayed(_mp_worker)(*tup) for tup in args)
639651
thread = Thread(target=lambda: results.append(parallel(tasks)))
640652
thread.start()

0 commit comments

Comments
 (0)