We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent 8960b96 commit e184357Copy full SHA for e184357
src/model.py
@@ -320,7 +320,7 @@ def _validate_device_config(self):
320
f"Detected KIND_GPU model instance, explicitly setting GPU device={triton_device_id} for {triton_instance}"
321
)
322
# vLLM doesn't currently (v0.4.2) expose device selection in the APIs
323
- torch.cuda.set_device(triton_device_id)
+ os.environ["CUDA_VISIBLE_DEVICES"] = str(triton_device_id)
324
325
def _setup_lora(self):
326
self.enable_lora = False
0 commit comments