File tree Expand file tree Collapse file tree 3 files changed +12
-7
lines changed Expand file tree Collapse file tree 3 files changed +12
-7
lines changed Original file line number Diff line number Diff line change @@ -230,8 +230,6 @@ def __init__(self,
230230
231231 def _create_dask_client (self ):
232232 self ._is_dask_client_internally_created = True
233- if self ._n_jobs is not None and self ._n_jobs > 1 :
234- dask .config .set ({'distributed.worker.daemon' : False })
235233 self ._dask_client = dask .distributed .Client (
236234 dask .distributed .LocalCluster (
237235 n_workers = self ._n_jobs ,
Original file line number Diff line number Diff line change 3636# Dask configuration
3737# ==================
3838#
39- # Auto-sklearn requires dask workers to not run in the daemon setting
39+ # Auto-sklearn uses threads in Dask to launch memory constrained jobs.
40+ # This number of threads can be provided directly via the n_jobs argument
41+ # when creating the AutoSklearnClassifier. Additionally, the user can provide
42+ # a dask_client argument which can have processes=True.
43+ # When using processes to True, we need to specify the below setting
44+ # to allow internally generated processes.
45+ # Optionally, you can choose to provide a dask client with processes=False
46+ # and remove the following line.
4047dask .config .set ({'distributed.worker.daemon' : False })
4148
4249
@@ -76,6 +83,9 @@ async def do_work():
7683# one can also start a dask scheduler from the command line), see the
7784# `dask cli docs <https://docs.dask.org/en/latest/setup/cli.html>`_ for
7885# further information.
86+ # Please not, that DASK_DISTRIBUTED__WORKER__DAEMON=False is required in this
87+ # case as dask-worker creates a new process. That is, it is equivalent to the
88+ # setting described above with dask.distributed.Client with processes=True
7989#
8090# Again, we need to make sure that we do not start the workers in a daemon
8191# mode.
Original file line number Diff line number Diff line change 33import time
44import unittest .mock
55
6- import dask
76from dask .distributed import Client , get_client
87import psutil
98import pytest
@@ -124,9 +123,8 @@ def dask_client(request):
124123
125124 Workers are in subprocesses to not create deadlocks with the pynisher and logging.
126125 """
127- dask .config .set ({'distributed.worker.daemon' : False })
128126
129- client = Client (n_workers = 2 , threads_per_worker = 1 , processes = True )
127+ client = Client (n_workers = 2 , threads_per_worker = 1 , processes = False )
130128 print ("Started Dask client={}\n " .format (client ))
131129
132130 def get_finalizer (address ):
@@ -150,7 +148,6 @@ def dask_client_single_worker(request):
150148 Using this might cause deadlocks with the pynisher and the logging module. However,
151149 it is used very rarely to avoid this issue as much as possible.
152150 """
153- dask .config .set ({'distributed.worker.daemon' : False })
154151
155152 client = Client (n_workers = 1 , threads_per_worker = 1 , processes = False )
156153 print ("Started Dask client={}\n " .format (client ))
You can’t perform that action at this time.
0 commit comments