Skip to content

Commit 8c60cb6

Browse files
authored
ENH: cuDF 25.2 compatibility (#853)
1 parent 8cfddb5 commit 8c60cb6

File tree

5 files changed

+58
-8
lines changed

5 files changed

+58
-8
lines changed

.github/workflows/python.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -290,7 +290,7 @@ jobs:
290290
if: ${{ matrix.module == 'gpu' }}
291291
run: |
292292
source activate ${{ env.CONDA_ENV }}
293-
pip install --extra-index-url=https://pypi.nvidia.com cudf-cu12==24.10.*
293+
pip install --extra-index-url=https://pypi.nvidia.com cudf-cu12==25.2.*
294294
pip install ucxx-cu12 cython numpy cloudpickle scikit-learn \
295295
pyyaml psutil tornado sqlalchemy defusedxml tqdm uvloop coverage \
296296
pytest pytest-cov pytest-timeout pytest-forked pytest-asyncio pytest-mock

python/xorbits/_mars/deploy/oscar/tests/test_local.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1144,7 +1144,7 @@ def test_naive_code_file():
11441144
raise
11451145

11461146

1147-
ucp = lazy_import("ucp")
1147+
ucp = lazy_import("ucxx")
11481148
_OSCAR_CONF_TEMPLATE = """
11491149
"@inherits": '@default'
11501150
oscar:

python/xorbits/_mars/lib/nvutils.py

Lines changed: 35 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -685,7 +685,41 @@ def _running_process_matches(handle: _nvmlDevice_t) -> bool:
685685
out : bool
686686
Whether the device handle has a CUDA context on the running process.
687687
"""
688-
return any(os.getpid() == o.pid for o in get_compute_running_processes(handle))
688+
current_pid = os.getpid()
689+
try:
690+
_nvml_check_error(_nvml_lib.nvmlInit_v2())
691+
procs = get_compute_running_processes(handle)
692+
if any(p.pid == current_pid for p in procs):
693+
return True
694+
except NVMLAPIError:
695+
pass
696+
finally:
697+
try:
698+
_nvml_lib.nvmlShutdown()
699+
except Exception:
700+
pass
701+
702+
try:
703+
import cupy
704+
705+
cupy.cuda.runtime.deviceSynchronize()
706+
return True
707+
except ImportError:
708+
pass
709+
except Exception:
710+
pass
711+
712+
try:
713+
out = subprocess.check_output(
714+
["nvidia-smi", "--query-compute-apps=pid", "--format=csv,noheader"],
715+
text=True,
716+
stderr=subprocess.DEVNULL,
717+
)
718+
return str(current_pid) in out.splitlines()
719+
except Exception:
720+
pass
721+
722+
return False
689723

690724

691725
def get_cuda_context() -> CudaContext:

python/xorbits/_mars/tensor/array_utils.py

Lines changed: 12 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -86,8 +86,12 @@ def _get(x):
8686

8787

8888
def move_to_device(x, device_id):
89-
if hasattr(x, "device") and x.device.id == device_id:
90-
return x
89+
if hasattr(x, "device"):
90+
if isinstance(x.device, str):
91+
if x.device == str(device_id):
92+
return x
93+
elif x.device.id == device_id:
94+
return x
9195

9296
assert device_id >= 0
9397

@@ -97,7 +101,12 @@ def move_to_device(x, device_id):
97101
# for dense array, we currently copy from gpu to memory and then copy back to destination device
98102
# to avoid kernel panic
99103
with cp.cuda.Device(device_id):
100-
return cp.asarray(cp.asnumpy(x)) # remove `cp.asnumpy` call to do directly copy
104+
if isinstance(x, np.ndarray):
105+
return cp.asarray(x)
106+
else:
107+
return cp.asarray(
108+
cp.asnumpy(x)
109+
) # remove `cp.asnumpy` call to do directly copy
101110

102111

103112
def convert_order(x, order):

python/xorbits/_mars/utils.py

Lines changed: 9 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -504,7 +504,11 @@ def calc_data_size(dt: Any, shape: Tuple[int] = None) -> int:
504504
if hasattr(dt, "shape") and len(dt.shape) == 0:
505505
return 0
506506
if hasattr(dt, "dtypes") and shape is not None:
507-
size = shape[0] * sum(_get_dtype_itemsize(dtype) for dtype in dt.dtypes)
507+
dtypes = dt.dtypes
508+
try:
509+
size = shape[0] * sum(_get_dtype_itemsize(dtype) for dtype in dtypes)
510+
except TypeError:
511+
size = shape[0] * _get_dtype_itemsize(dtypes)
508512
try:
509513
index_value_value = dt.index_value.value
510514
if hasattr(index_value_value, "dtype") and not isinstance(
@@ -552,7 +556,10 @@ def _is_fast_dtype(dtype):
552556

553557
# handling possible MultiIndex
554558
if hasattr(index_obj, "dtypes"):
555-
dtypes.extend(index_obj.dtypes)
559+
try:
560+
dtypes.extend(index_obj.dtypes)
561+
except TypeError:
562+
dtypes.append(index_obj.dtypes)
556563
else:
557564
dtypes.append(index_obj.dtype)
558565

0 commit comments

Comments
 (0)