Skip to content

Commit 1a1953d

Browse files
Enforce ruff rules (RUF) (#9153)
1 parent d55a2fd commit 1a1953d

File tree

16 files changed

+37
-23
lines changed

16 files changed

+37
-23
lines changed

distributed/cfexecutor.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -125,8 +125,7 @@ def map(self, fn, *iterables, **kwargs):
125125
if timeout is not None:
126126
timeout = parse_timedelta(timeout)
127127
end_time = timeout + time()
128-
if "chunksize" in kwargs:
129-
del kwargs["chunksize"]
128+
kwargs.pop("chunksize", None)
130129
if kwargs:
131130
raise TypeError("unexpected arguments to map(): %s" % sorted(kwargs))
132131

distributed/client.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2297,8 +2297,8 @@ def map(
22972297
keys = [list(element) for element in partition_all(batch_size, key)]
22982298
else:
22992299
keys = [key for _ in range(len(batches))]
2300-
return sum(
2301-
(
2300+
return list(
2301+
flatten(
23022302
self.map(
23032303
func,
23042304
*batch,
@@ -2315,8 +2315,7 @@ def map(
23152315
**kwargs,
23162316
)
23172317
for key, batch in zip(keys, batches)
2318-
),
2319-
[],
2318+
)
23202319
)
23212320

23222321
key = key or funcname(func)

distributed/core.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -520,7 +520,7 @@ async def start(self):
520520
timeout = getattr(self, "death_timeout", None)
521521

522522
async def _close_on_failure(exc: Exception) -> None:
523-
await self.close(reason=f"failure-to-start-{str(type(exc))}")
523+
await self.close(reason=f"failure-to-start-{type(exc)}")
524524
self.status = Status.failed
525525
self.__startup_exc = exc
526526

distributed/deploy/local.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -211,7 +211,7 @@ def __init__(
211211
n_workers = max(1, CPU_COUNT // threads_per_worker) if processes else 1
212212
if n_workers and threads_per_worker is None:
213213
# Overcommit threads per worker, rather than undercommit
214-
threads_per_worker = max(1, int(math.ceil(CPU_COUNT / n_workers)))
214+
threads_per_worker = max(1, math.ceil(CPU_COUNT / n_workers))
215215
if n_workers and "memory_limit" not in worker_kwargs:
216216
worker_kwargs["memory_limit"] = parse_memory_limit(
217217
"auto", 1, n_workers, logger=logger

distributed/deploy/spec.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -514,10 +514,10 @@ def _memory_per_worker(self) -> int:
514514

515515
def scale(self, n=0, memory=None, cores=None):
516516
if memory is not None:
517-
n = max(n, int(math.ceil(parse_bytes(memory) / self._memory_per_worker())))
517+
n = max(n, math.ceil(parse_bytes(memory) / self._memory_per_worker()))
518518

519519
if cores is not None:
520-
n = max(n, int(math.ceil(cores / self._threads_per_worker())))
520+
n = max(n, math.ceil(cores / self._threads_per_worker()))
521521

522522
if len(self.worker_spec) > n:
523523
not_yet_launched = set(self.worker_spec) - {

distributed/deploy/subprocess.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -241,7 +241,7 @@ def SubprocessCluster(
241241
n_workers = max(1, CPU_COUNT // threads_per_worker)
242242
if n_workers and threads_per_worker is None:
243243
# Overcommit threads per worker, rather than undercommit
244-
threads_per_worker = max(1, int(math.ceil(CPU_COUNT / n_workers)))
244+
threads_per_worker = max(1, math.ceil(CPU_COUNT / n_workers))
245245
if n_workers and "memory_limit" not in worker_kwargs:
246246
worker_kwargs["memory_limit"] = parse_memory_limit(
247247
"auto", 1, n_workers, logger=logger

distributed/protocol/serialize.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -126,7 +126,7 @@ def import_allowed_module(name):
126126
return _cached_allowed_modules[name]
127127
else:
128128
raise RuntimeError(
129-
f"Importing {repr(name)} is not allowed, please add it to the list of "
129+
f"Importing {name!r} is not allowed, please add it to the list of "
130130
"allowed modules the scheduler can import via the "
131131
"distributed.scheduler.allowed-imports configuration setting."
132132
)

distributed/protocol/tests/test_serialize.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -284,8 +284,8 @@ def test_serialize_bytes(kwargs):
284284
1,
285285
"abc",
286286
b"ab" * int(40e6),
287-
int(2**26) * b"ab",
288-
(int(2**25) * b"ab", int(2**25) * b"ab"),
287+
(2**26) * b"ab",
288+
((2**25) * b"ab", (2**25) * b"ab"),
289289
]:
290290
b = serialize_bytes(x, **kwargs)
291291
assert isinstance(b, bytes)

distributed/shuffle/tests/test_buffer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@
1515

1616

1717
def gen_bytes(percentage: float, limit: int) -> bytes:
18-
num_bytes = int(math.floor(percentage * limit))
18+
num_bytes = math.floor(percentage * limit)
1919
return b"0" * num_bytes
2020

2121

distributed/shuffle/tests/test_comm_buffer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ async def send(address, shards):
9494

9595

9696
def gen_bytes(percentage: float, memory_limit: int) -> bytes:
97-
num_bytes = int(math.floor(percentage * memory_limit))
97+
num_bytes = math.floor(percentage * memory_limit)
9898
return b"0" * num_bytes
9999

100100

0 commit comments

Comments
 (0)