how to get rapids to resolve? #1812
-
q1 - how to fix the channel priority issueI somehow get an issue with channel priority:
my pixi file looks like: [dependencies]
python = "3.11"
pip = ">=24.2"
[environments]
research-cuda = {features = ["research-cuda", "research-basics"], solve-group = "default"}
[feature.research-basics.dependencies]
pytorch = {version = ">=2.4.0,<2.5", channel = "pytorch"}
torchaudio = {version = ">=2.4.0,<2.5", channel = "pytorch"}
torchvision = {version = ">=0.19.0,<0.20", channel = "pytorch"}
[feature.research-cuda]
channels = [
{channel = "rapidsai", priority = 1},
{channel = "nvidia", priority = 2},
{channel = "pytorch", priority = 3},
{channel = "conda-forge", priority = 4},
]
[feature.research-cuda.target.linux-64.dependencies]
pytorch-cuda = "~=12.4"
cuda-version = {version = ">=12.0,<=12.5", channel = "rapidsai"}
rapids = {version = "~=24.08", channel = "rapidsai"}
[feature.research-cuda.system-requirements]
cuda = "12"
[project]
name = "foo"
version = "1.0.0"
authors = [
"bar"
]
platforms = ["linux-64", "osx-arm64"]
channels = ["conda-forge", "pytorch"]
how can I fix this? Various attempts to modify channel priority have failed so far for me. q2 how to refine the pytorch importAccording to nvidia: |
Beta Was this translation helpful? Give feedback.
Replies: 4 comments 3 replies
-
Relacing cuda-version leads to |
Beta Was this translation helpful? Give feedback.
-
Pinning dask-cuda = {version = "~=24.08", channel = "rapidsai"} leads to the same issue for rmm. something with the channel priority must be wrong. And assuming higher is better changing priority like channels = [ leads to same error for cupy - but not it is trying to pull from conda-forge |
Beta Was this translation helpful? Give feedback.
-
Something like this
seems to resolve but:
|
Beta Was this translation helpful? Give feedback.
-
He @geoHeil, I've given the resolve a try, I've come up with this pixi.toml as a result. This solve for me locally and on first glance it looks to take the correct package from the right channels. [dependencies]
python = "3.11"
pip = ">=24.2"
[environments]
research-cuda = {features = ["research-cuda", "research-basics"], solve-group = "default"}
[feature.research-basics.dependencies]
pytorch = {version = ">=2.4.0,<2.5", channel = "pytorch"}
torchaudio = {version = ">=2.4.0,<2.5", channel = "pytorch"}
torchvision = {version = ">=0.19.0,<0.20", channel = "pytorch"}
[feature.research-cuda]
# Take what you can from rapidsai, but if you can't find it there, try conda-forge, and then pytorch
# Nivia has moved all packages for cuda 12 to conda-forge
channels = [
{channel = "rapidsai", priority = 3},
{channel = "conda-forge", priority = 2},
{channel = "pytorch", priority = 1},
]
[feature.research-cuda.target.linux-64.dependencies]
pytorch-cuda = "~=12.4"
rapids = "~=24.08"
# These packages are available in rapidsai, but should be installed from conda-forge
cupy = { version = "*", channel = "conda-forge" }
ucx = { version = "*", channel = "conda-forge" }
datashader = { version = "*", channel = "conda-forge" }
[feature.research-cuda.system-requirements]
cuda = "12"
[project]
name = "foo"
platforms = ["linux-64", "osx-arm64"]
channels = ["conda-forge"] I hope this is what you need. Tips to concur the multi-channel problem.
Reasons this was hard.
|
Beta Was this translation helpful? Give feedback.
He @geoHeil,
I've given the resolve a try, I've come up with this pixi.toml as a result. This solve for me locally and on first glance it looks to take the correct package from the right channels.