File tree Expand file tree Collapse file tree 3 files changed +8
-7
lines changed
cpp/tensorrt_llm/executor/cache_transmission/nixl_utils Expand file tree Collapse file tree 3 files changed +8
-7
lines changed Original file line number Diff line number Diff line change @@ -348,11 +348,11 @@ NixlTransferAgent::NixlTransferAgent(BaseAgentConfig const& config)
348348
349349 std::string nixlBackend = common::getEnvNixlBackend ();
350350 // List of supported backends - extend this list as new backends are added
351- static const std::set<std::string> kSUPPORTED_BACKENDS = {" UCX" };
351+ static const std::set<std::string> kSUPPORTED_BACKENDS = {" UCX" , " LIBFABRIC " };
352352
353353 if (kSUPPORTED_BACKENDS .find (nixlBackend) == kSUPPORTED_BACKENDS .end ())
354354 {
355- TLLM_LOG_ERROR (" Unsupported NIXL backend: %s, fallback to UCX" , nixlBackend.c_str ());
355+ TLLM_LOG_WARNING (" Unsupported NIXL backend: %s, fallback to UCX" , nixlBackend.c_str ());
356356 nixlBackend = " UCX" ;
357357 }
358358
Original file line number Diff line number Diff line change @@ -38,7 +38,8 @@ meson setup builddir \
3838 -Dcudapath_lib=" $CUDA_PATH /lib64" \
3939 -Dcudapath_inc=" $CUDA_PATH /include" \
4040 -Dgds_path=" $GDS_PATH " \
41- -Dinstall_headers=true
41+ -Dinstall_headers=true \
42+ --buildtype=release
4243
4344cd builddir && ninja install
4445cd ../..
Original file line number Diff line number Diff line change 1313# images are adopted from PostMerge pipelines, the abbreviated commit hash is used instead.
1414IMAGE_NAME =urm.nvidia.com/sw-tensorrt-docker/tensorrt-llm
1515
16- LLM_DOCKER_IMAGE =urm.nvidia.com/sw-tensorrt-docker/tensorrt-llm:pytorch-25.10-py3-x86_64-ubuntu24.04-trt10.13.3.9-skip-tritondevel-202511281406-9077
17- LLM_SBSA_DOCKER_IMAGE =urm.nvidia.com/sw-tensorrt-docker/tensorrt-llm:pytorch-25.10-py3-aarch64-ubuntu24.04-trt10.13.3.9-skip-tritondevel-202511281406-9077
18- LLM_ROCKYLINUX8_PY310_DOCKER_IMAGE =urm.nvidia.com/sw-tensorrt-docker/tensorrt-llm:cuda-13.0.2-devel-rocky8-x86_64-rocky8-py310-trt10.13.3.9-skip-tritondevel-202511281406-9077
19- LLM_ROCKYLINUX8_PY312_DOCKER_IMAGE =urm.nvidia.com/sw-tensorrt-docker/tensorrt-llm:cuda-13.0.2-devel-rocky8-x86_64-rocky8-py312-trt10.13.3.9-skip-tritondevel-202511281406-9077
16+ LLM_DOCKER_IMAGE =urm.nvidia.com/sw-tensorrt-docker/tensorrt-llm:pytorch-25.10-py3-x86_64-ubuntu24.04-trt10.13.3.9-skip-tritondevel-202512041415-9225
17+ LLM_SBSA_DOCKER_IMAGE =urm.nvidia.com/sw-tensorrt-docker/tensorrt-llm:pytorch-25.10-py3-aarch64-ubuntu24.04-trt10.13.3.9-skip-tritondevel-202512041415-9225
18+ LLM_ROCKYLINUX8_PY310_DOCKER_IMAGE =urm.nvidia.com/sw-tensorrt-docker/tensorrt-llm:cuda-13.0.2-devel-rocky8-x86_64-rocky8-py310-trt10.13.3.9-skip-tritondevel-202512041415-9225
19+ LLM_ROCKYLINUX8_PY312_DOCKER_IMAGE =urm.nvidia.com/sw-tensorrt-docker/tensorrt-llm:cuda-13.0.2-devel-rocky8-x86_64-rocky8-py312-trt10.13.3.9-skip-tritondevel-202512041415-9225
You can’t perform that action at this time.
0 commit comments