Skip to content

Commit 4cda7ce

Browse files
authored
cleanup: no more lock around GPU workloads (#890)
1 parent 74b9345 commit 4cda7ce

File tree

1 file changed

+1
-17
lines changed

1 file changed

+1
-17
lines changed

python/cocoindex/op.py

Lines changed: 1 addition & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -11,10 +11,6 @@
1111
Awaitable,
1212
Callable,
1313
Protocol,
14-
ParamSpec,
15-
TypeVar,
16-
Type,
17-
cast,
1814
dataclass_transform,
1915
Annotated,
2016
get_args,
@@ -125,9 +121,6 @@ def __call__(
125121
return (result_type, executor)
126122

127123

128-
_gpu_dispatch_lock = asyncio.Lock()
129-
130-
131124
_COCOINDEX_ATTR_PREFIX = "cocoindex.io/"
132125

133126

@@ -348,16 +341,7 @@ async def __call__(self, *args: Any, **kwargs: Any) -> Any:
348341
decoded_kwargs[kwarg_name] = kwarg_info.decoder(arg)
349342

350343
assert self._acall is not None
351-
if op_args.gpu:
352-
# For GPU executions, data-level parallelism is applied, so we don't want to
353-
# execute different tasks in parallel.
354-
# Besides, multiprocessing is more appropriate for pytorch.
355-
# For now, we use a lock to ensure only one task is executed at a time.
356-
# TODO: Implement multi-processing dispatching.
357-
async with _gpu_dispatch_lock:
358-
output = await self._acall(*decoded_args, **decoded_kwargs)
359-
else:
360-
output = await self._acall(*decoded_args, **decoded_kwargs)
344+
output = await self._acall(*decoded_args, **decoded_kwargs)
361345
return self._result_encoder(output)
362346

363347
def enable_cache(self) -> bool:

0 commit comments

Comments
 (0)