Skip to content

Commit 3a256de

Browse files
authored
Remove deprecated max_worker (#3605)
Any workflows that still use max_worker will no longer simply get a warning message but still work. Users will need to change any scripts to use max_workers_per_node. (But they've had six months of warnings to do the deed!)
1 parent 3f2bf18 commit 3a256de

File tree

6 files changed

+19
-46
lines changed

6 files changed

+19
-46
lines changed

parsl/executors/high_throughput/executor.py

Lines changed: 1 addition & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -199,9 +199,6 @@ class HighThroughputExecutor(BlockProviderExecutor, RepresentationMixin, UsageIn
199199
will check the available memory at startup and limit the number of workers such that
200200
the there's sufficient memory for each worker. Default: None
201201
202-
max_workers : int
203-
Deprecated. Please use max_workers_per_node instead.
204-
205202
max_workers_per_node : int
206203
Caps the number of workers launched per node. Default: None
207204
@@ -239,7 +236,6 @@ def __init__(self,
239236
worker_debug: bool = False,
240237
cores_per_worker: float = 1.0,
241238
mem_per_worker: Optional[float] = None,
242-
max_workers: Optional[Union[int, float]] = None,
243239
max_workers_per_node: Optional[Union[int, float]] = None,
244240
cpu_affinity: str = 'none',
245241
available_accelerators: Union[int, Sequence[str]] = (),
@@ -272,9 +268,7 @@ def __init__(self,
272268
else:
273269
self.all_addresses = ','.join(get_all_addresses())
274270

275-
if max_workers:
276-
self._warn_deprecated("max_workers", "max_workers_per_node")
277-
self.max_workers_per_node = max_workers_per_node or max_workers or float("inf")
271+
self.max_workers_per_node = max_workers_per_node or float("inf")
278272

279273
mem_slots = self.max_workers_per_node
280274
cpu_slots = self.max_workers_per_node
@@ -335,16 +329,6 @@ def _warn_deprecated(self, old: str, new: str):
335329
stacklevel=2
336330
)
337331

338-
@property
339-
def max_workers(self):
340-
self._warn_deprecated("max_workers", "max_workers_per_node")
341-
return self.max_workers_per_node
342-
343-
@max_workers.setter
344-
def max_workers(self, val: Union[int, float]):
345-
self._warn_deprecated("max_workers", "max_workers_per_node")
346-
self.max_workers_per_node = val
347-
348332
@property
349333
def logdir(self):
350334
return "{}/{}".format(self.run_dir, self.label)

parsl/tests/site_tests/README.rst

Lines changed: 15 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -46,27 +46,28 @@ Adding a new site
4646
1. Specialized python builds for the system (for eg, Summit)
4747
2. Anaconda available via modules
4848
3. User's conda installation
49-
* Add a new block to `conda_setup.sh` that installs a fresh environment and writes out
50-
the activation commands to `~/setup_parsl_test_env.sh`
51-
* Add a site config to `parsl/tests/configs/<SITE.py>` and add your local user options
52-
to `parsl/tests/configs/local_user_opts.py`. For eg, `here's mine<https://gist.github.com/yadudoc/b71765284d2db0706c4f43605dd8b8d6>`_
53-
Make sure that the site config uses the `fresh_config` pattern.
49+
* Add a new block to ``conda_setup.sh`` that installs a fresh environment and writes out
50+
the activation commands to ``~/setup_parsl_test_env.sh``
51+
* Add a site config to ``parsl/tests/configs/<SITE.py>`` and add your local user options
52+
to ``parsl/tests/configs/local_user_opts.py``. For example,
53+
`here's mine<https://gist.github.com/yadudoc/b71765284d2db0706c4f43605dd8b8d6>`_
54+
Make sure that the site config uses the ``fresh_config`` pattern.
5455
Please ensure that the site config uses:
55-
* max_workers = 1
56-
* init_blocks = 1
57-
* min_blocks = 0
56+
* ``max_workers_per_node = 1``
57+
* ``init_blocks = 1``
58+
* ``min_blocks = 0``
5859

59-
* Add this site config to `parsl/tests/site_tests/site_config_selector.py`
60-
* Reinstall parsl, using `pip install .`
61-
* Test a single test: `python3 test_site.py -d` to confirm that the site works correctly.
62-
* Once tests are passing run the whole site_test with `make site_test`
60+
* Add this site config to ``parsl/tests/site_tests/site_config_selector.py``
61+
* Reinstall parsl, using ``pip install .``
62+
* Test a single test: ``python3 test_site.py -d`` to confirm that the site works correctly.
63+
* Once tests are passing run the whole site_test with ``make site_test``
6364

6465

6566
Shared filesystem option
6667
------------------------
6768

68-
There is a new env variable "SHARED_FS_OPTIONS" to pass markers to pytest to skip certain tests.
69+
There is a new env variable ``SHARED_FS_OPTIONS`` to pass markers to pytest to skip certain tests.
6970

7071
When there's a shared-FS, the default NoOpStaging works. However, when there's no shared-FS some tests
7172
that uses File objects require a staging provider (eg. rsync). These tests can be turned off with
72-
`-k "not staging_required"`
73+
``-k "not staging_required"``

parsl/tests/test_htex/test_htex.py

Lines changed: 0 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -126,18 +126,6 @@ def kill_interchange(*args, **kwargs):
126126
assert "HighThroughputExecutor has not started" in caplog.text
127127

128128

129-
@pytest.mark.local
130-
def test_max_workers_per_node():
131-
with pytest.warns(DeprecationWarning) as record:
132-
htex = HighThroughputExecutor(max_workers_per_node=1, max_workers=2)
133-
134-
warning_msg = "max_workers is deprecated"
135-
assert any(warning_msg in str(warning.message) for warning in record)
136-
137-
# Ensure max_workers_per_node takes precedence
138-
assert htex.max_workers_per_node == htex.max_workers == 1
139-
140-
141129
@pytest.mark.local
142130
@pytest.mark.parametrize("cmd", (None, "custom-launch-cmd"))
143131
def test_htex_worker_pool_launch_cmd(cmd: Optional[str]):

parsl/tests/test_mpi_apps/test_mpiex.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ def test_init():
4343

4444
new_kwargs = {'max_workers_per_block', 'mpi_launcher'}
4545
excluded_kwargs = {'available_accelerators', 'cores_per_worker', 'max_workers_per_node',
46-
'mem_per_worker', 'cpu_affinity', 'max_workers', 'manager_selector'}
46+
'mem_per_worker', 'cpu_affinity', 'manager_selector'}
4747

4848
# Get the kwargs from both HTEx and MPIEx
4949
htex_kwargs = set(signature(HighThroughputExecutor.__init__).parameters)

parsl/tests/test_scaling/test_scale_down_htex_auto_scale.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ def local_config():
2323
poll_period=100,
2424
label="htex_local",
2525
address="127.0.0.1",
26-
max_workers=1,
26+
max_workers_per_node=1,
2727
encrypted=True,
2828
provider=LocalProvider(
2929
channel=LocalChannel(),

parsl/tests/test_scaling/test_scale_down_htex_unregistered.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ def local_config():
2727
poll_period=100,
2828
label="htex_local",
2929
address="127.0.0.1",
30-
max_workers=1,
30+
max_workers_per_node=1,
3131
encrypted=True,
3232
launch_cmd="sleep inf",
3333
provider=LocalProvider(

0 commit comments

Comments
 (0)