diff --git a/.gitignore b/.gitignore index bafd2974..419005f0 100644 --- a/.gitignore +++ b/.gitignore @@ -140,3 +140,4 @@ dmypy.json # vscode .vscode/settings.json +.vscode/c_cpp_properties.json diff --git a/CMakeLists.txt b/CMakeLists.txt index 69c7c698..0aaa95af 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -241,6 +241,8 @@ set( src/pb_response_iterator.cc src/pb_cancel.cc src/pb_cancel.h + src/pb_bls_cancel.cc + src/pb_bls_cancel.h ) list(APPEND diff --git a/README.md b/README.md index b00dc0bf..dd5e877a 100644 --- a/README.md +++ b/README.md @@ -1409,14 +1409,52 @@ class TritonPythonModel: A complete example for sync and async BLS for decoupled models is included in the [Examples](#examples) section. +Note: Async BLS is not supported on Python 3.6 or lower due to the `async` +keyword and `asyncio.run` being introduced in Python 3.7. + Starting from the 22.04 release, the lifetime of the BLS output tensors have been improved such that if a tensor is no longer needed in your Python model it will be automatically deallocated. This can increase the number of BLS requests that you can execute in your model without running into the out of GPU or shared memory error. -Note: Async BLS is not supported on Python 3.6 or lower due to the `async` -keyword and `asyncio.run` being introduced in Python 3.7. +### Cancelling decoupled BLS requests +A decoupled BLS inference request may be cancelled by calling the `cancel()` +method on the response iterator returned from the method executing the BLS +inference request. For example, + +```python +import triton_python_backend_utils as pb_utils + +class TritonPythonModel: + ... + def execute(self, requests): + ... + bls_response_iterator = bls_request.exec(decoupled=True) + ... + bls_response_iterator.cancel() + ... +``` + +You may also call the `cancel()` method on the response iterator returned from +the `async_exec()` method of the inference request. For example, + +```python +import triton_python_backend_utils as pb_utils + +class TritonPythonModel: + ... + async def execute(self, requests): + ... + bls_response_iterator = await bls_request.async_exec(decoupled=True) + ... + bls_response_iterator.cancel() + ... +``` + +Note: Whether the decoupled model returns a cancellation error and stops executing +the request depends on the model's backend implementation. Please refer to the +documentation for more details [Handing in Backend](https://github.com/triton-inference-server/server/blob/main/docs/user_guide/request_cancellation.md#handling-in-backend) ## Model Loading API diff --git a/src/infer_payload.cc b/src/infer_payload.cc index 762201e8..6baad307 100644 --- a/src/infer_payload.cc +++ b/src/infer_payload.cc @@ -1,4 +1,4 @@ -// Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// Copyright 2023-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions @@ -31,7 +31,8 @@ namespace triton { namespace backend { namespace python { InferPayload::InferPayload( const bool is_decoupled, std::function)> callback) - : is_decoupled_(is_decoupled), is_promise_set_(false), callback_(callback) + : is_decoupled_(is_decoupled), is_promise_set_(false), callback_(callback), + request_address_(reinterpret_cast(nullptr)) { promise_.reset(new std::promise>()); } @@ -91,4 +92,31 @@ InferPayload::ResponseAllocUserp() return response_alloc_userp_; } +void +InferPayload::SetRequestAddress(intptr_t request_address) +{ + std::unique_lock lock(request_address_mutex_); + request_address_ = request_address; +} + +void +InferPayload::SetRequestCancellationFunc( + const std::function& request_cancel_func) +{ + request_cancel_func_ = request_cancel_func; +} + +void +InferPayload::SafeCancelRequest() +{ + std::unique_lock lock(request_address_mutex_); + if (request_address_ == 0L) { + return; + } + + if (request_cancel_func_) { + request_cancel_func_(request_address_); + } +} + }}} // namespace triton::backend::python diff --git a/src/infer_payload.h b/src/infer_payload.h index 662e8922..8e4aa7d3 100644 --- a/src/infer_payload.h +++ b/src/infer_payload.h @@ -1,4 +1,4 @@ -// Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// Copyright 2023-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions @@ -62,6 +62,10 @@ class InferPayload : public std::enable_shared_from_this { void SetResponseAllocUserp( const ResponseAllocatorUserp& response_alloc_userp); std::shared_ptr ResponseAllocUserp(); + void SetRequestAddress(intptr_t request_address); + void SetRequestCancellationFunc( + const std::function& request_cancel_func); + void SafeCancelRequest(); private: std::unique_ptr>> promise_; @@ -70,6 +74,9 @@ class InferPayload : public std::enable_shared_from_this { bool is_promise_set_; std::function)> callback_; std::shared_ptr response_alloc_userp_; + std::mutex request_address_mutex_; + intptr_t request_address_; + std::function request_cancel_func_; }; }}} // namespace triton::backend::python diff --git a/src/infer_response.cc b/src/infer_response.cc index a6b6847d..382756d4 100644 --- a/src/infer_response.cc +++ b/src/infer_response.cc @@ -91,6 +91,7 @@ InferResponse::SaveToSharedMemory( response_shm_ptr->is_error_set = false; shm_handle_ = response_shm_.handle_; response_shm_ptr->is_last_response = is_last_response_; + response_shm_ptr->id = id_; // Only save the output tensors to shared memory when the inference response // doesn't have error. @@ -113,7 +114,6 @@ InferResponse::SaveToSharedMemory( tensor_handle_shm_ptr[j] = output_tensor->ShmHandle(); j++; } - response_shm_ptr->id = id_; parameters_shm_ = PbString::Create(shm_pool, parameters_); response_shm_ptr->parameters = parameters_shm_->ShmHandle(); diff --git a/src/ipc_message.h b/src/ipc_message.h index c3d1472e..c0fab3a3 100644 --- a/src/ipc_message.h +++ b/src/ipc_message.h @@ -1,4 +1,4 @@ -// Copyright 2021-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// Copyright 2021-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions @@ -67,7 +67,8 @@ typedef enum PYTHONSTUB_commandtype_enum { PYTHONSTUB_LoadModelRequest, PYTHONSTUB_UnloadModelRequest, PYTHONSTUB_ModelReadinessRequest, - PYTHONSTUB_IsRequestCancelled + PYTHONSTUB_IsRequestCancelled, + PYTHONSTUB_CancelBLSInferRequest } PYTHONSTUB_CommandType; /// diff --git a/src/pb_bls_cancel.cc b/src/pb_bls_cancel.cc new file mode 100644 index 00000000..0df4492b --- /dev/null +++ b/src/pb_bls_cancel.cc @@ -0,0 +1,92 @@ +// Copyright 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// * Neither the name of NVIDIA CORPORATION nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include "pb_bls_cancel.h" + +#include "pb_stub.h" + +namespace triton { namespace backend { namespace python { + +void +PbBLSCancel::SaveToSharedMemory(std::unique_ptr& shm_pool) +{ + cancel_shm_ = shm_pool->Construct(); + new (&(cancel_shm_.data_->mu)) bi::interprocess_mutex; + new (&(cancel_shm_.data_->cv)) bi::interprocess_condition; + cancel_shm_.data_->waiting_on_stub = false; + cancel_shm_.data_->infer_payload_id = infer_playload_id_; + cancel_shm_.data_->is_cancelled = is_cancelled_; +} + +bi::managed_external_buffer::handle_t +PbBLSCancel::ShmHandle() +{ + return cancel_shm_.handle_; +} + +CancelBLSRequestMessage* +PbBLSCancel::ShmPayload() +{ + return cancel_shm_.data_.get(); +} + +void +PbBLSCancel::Cancel() +{ + // Release the GIL. Python objects are not accessed during the check. + py::gil_scoped_release gil_release; + + std::unique_lock lk(mu_); + // The cancelled flag can only move from false to true, not the other way, so + // it is checked on each query until cancelled and then implicitly cached. + if (is_cancelled_) { + return; + } + if (!updating_) { + std::unique_ptr& stub = Stub::GetOrCreateInstance(); + if (!stub->StubToParentServiceActive()) { + LOG_ERROR << "Cannot communicate with parent service"; + return; + } + + stub->EnqueueCancelBLSRequest(this); + updating_ = true; + } + cv_.wait(lk, [this] { return !updating_; }); +} + +void +PbBLSCancel::ReportIsCancelled(bool is_cancelled) +{ + { + std::lock_guard lk(mu_); + is_cancelled_ = is_cancelled; + updating_ = false; + } + cv_.notify_all(); +} + +}}} // namespace triton::backend::python diff --git a/src/pb_bls_cancel.h b/src/pb_bls_cancel.h new file mode 100644 index 00000000..7fdd3fbf --- /dev/null +++ b/src/pb_bls_cancel.h @@ -0,0 +1,63 @@ +// Copyright 2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions +// are met: +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// * Neither the name of NVIDIA CORPORATION nor the names of its +// contributors may be used to endorse or promote products derived +// from this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY +// EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR +// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, +// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, +// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY +// OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#pragma once + +#include +#include + +#include "pb_utils.h" + +namespace triton { namespace backend { namespace python { + +class PbBLSCancel { + public: + PbBLSCancel(void* infer_playload_id) + : updating_(false), infer_playload_id_(infer_playload_id), + is_cancelled_(false) + { + } + DISALLOW_COPY_AND_ASSIGN(PbBLSCancel); + + void SaveToSharedMemory(std::unique_ptr& shm_pool); + bi::managed_external_buffer::handle_t ShmHandle(); + CancelBLSRequestMessage* ShmPayload(); + + void Cancel(); + void ReportIsCancelled(bool is_cancelled); + + private: + AllocatedSharedMemory cancel_shm_; + + std::mutex mu_; + std::condition_variable cv_; + bool updating_; + + void* infer_playload_id_; + bool is_cancelled_; +}; + +}}}; // namespace triton::backend::python diff --git a/src/pb_response_iterator.cc b/src/pb_response_iterator.cc index 9abf4997..536d4232 100644 --- a/src/pb_response_iterator.cc +++ b/src/pb_response_iterator.cc @@ -1,4 +1,4 @@ -// Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// Copyright 2023-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions @@ -40,6 +40,7 @@ ResponseIterator::ResponseIterator( : id_(response->Id()), is_finished_(false), is_cleared_(false), idx_(0) { response_buffer_.push(response); + pb_bls_cancel_ = std::make_shared(response->Id()); } ResponseIterator::~ResponseIterator() @@ -159,4 +160,12 @@ ResponseIterator::GetExistingResponses() return responses; } +void +ResponseIterator::Cancel() +{ + if (!is_finished_) { + pb_bls_cancel_->Cancel(); + } +} + }}} // namespace triton::backend::python diff --git a/src/pb_response_iterator.h b/src/pb_response_iterator.h index cad5ff1f..cb26d6a3 100644 --- a/src/pb_response_iterator.h +++ b/src/pb_response_iterator.h @@ -1,4 +1,4 @@ -// Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// Copyright 2023-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions @@ -29,6 +29,7 @@ #include #include "infer_response.h" +#include "pb_bls_cancel.h" namespace triton { namespace backend { namespace python { @@ -43,6 +44,7 @@ class ResponseIterator { void* Id(); void Clear(); std::vector> GetExistingResponses(); + void Cancel(); private: std::vector> responses_; @@ -53,6 +55,7 @@ class ResponseIterator { bool is_finished_; bool is_cleared_; size_t idx_; + std::shared_ptr pb_bls_cancel_; }; }}} // namespace triton::backend::python diff --git a/src/pb_stub.cc b/src/pb_stub.cc index 4e09ea1d..0a2279ec 100644 --- a/src/pb_stub.cc +++ b/src/pb_stub.cc @@ -1136,6 +1136,9 @@ Stub::ServiceStubToParentRequests() } else if ( utils_msg_payload->command_type == PYTHONSTUB_IsRequestCancelled) { SendIsCancelled(utils_msg_payload); + } else if ( + utils_msg_payload->command_type == PYTHONSTUB_CancelBLSInferRequest) { + SendCancelBLSRequest(utils_msg_payload); } else { std::cerr << "Error when sending message via stub_to_parent message " "buffer - unknown command\n"; @@ -1221,6 +1224,46 @@ Stub::EnqueueCleanupId(void* id, const PYTHONSTUB_CommandType& command_type) } } +void +Stub::SendCancelBLSRequest( + std::unique_ptr& utils_msg_payload) +{ + PbBLSCancel* pb_bls_cancel = + reinterpret_cast(utils_msg_payload->utils_message_ptr); + pb_bls_cancel->SaveToSharedMemory(shm_pool_); + + CancelBLSRequestMessage* message_payload = pb_bls_cancel->ShmPayload(); + std::unique_ptr ipc_message = + IPCMessage::Create(shm_pool_, false /* inline_response */); + ipc_message->Command() = utils_msg_payload->command_type; + ipc_message->Args() = pb_bls_cancel->ShmHandle(); + + bool is_cancelled = false; + { + bi::scoped_lock lk(message_payload->mu); + + SendIPCUtilsMessage(ipc_message); + while (!message_payload->waiting_on_stub) { + message_payload->cv.wait(lk); + } + + is_cancelled = message_payload->is_cancelled; + message_payload->waiting_on_stub = false; + message_payload->cv.notify_all(); + } + pb_bls_cancel->ReportIsCancelled(is_cancelled); +} + +void +Stub::EnqueueCancelBLSRequest(PbBLSCancel* pb_bls_cancel) +{ + std::unique_ptr utils_msg_payload = + std::make_unique( + PYTHONSTUB_CancelBLSInferRequest, + reinterpret_cast(pb_bls_cancel)); + EnqueueUtilsMessage(std::move(utils_msg_payload)); +} + void Stub::EnqueueIsCancelled(PbCancel* pb_cancel) { @@ -1909,7 +1952,8 @@ PYBIND11_EMBEDDED_MODULE(c_python_backend_utils, module) it.Iter(); return it; }) - .def("__next__", &ResponseIterator::Next); + .def("__next__", &ResponseIterator::Next) + .def("cancel", &ResponseIterator::Cancel); py::class_ logger(module, "Logger"); py::enum_(logger, "LogLevel") diff --git a/src/pb_stub.h b/src/pb_stub.h index 7d76ec9a..172c04a8 100644 --- a/src/pb_stub.h +++ b/src/pb_stub.h @@ -1,4 +1,4 @@ -// Copyright 2021-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// Copyright 2021-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions @@ -321,6 +321,15 @@ class Stub { /// and the response factory for BLS decoupled response. void EnqueueCleanupId(void* id, const PYTHONSTUB_CommandType& command_type); + /// Send the id to the python backend for request address retrieval and + /// cancellation + void SendCancelBLSRequest( + std::unique_ptr& utils_msg_payload); + + /// Add infer payload id to queue. This is used for retrieving the request + /// address from the infer_payload + void EnqueueCancelBLSRequest(PbBLSCancel* pb_bls_cancel); + /// Add request cancellation query to queue void EnqueueIsCancelled(PbCancel* pb_cancel); diff --git a/src/pb_utils.h b/src/pb_utils.h index aacf6b49..1306f375 100644 --- a/src/pb_utils.h +++ b/src/pb_utils.h @@ -1,4 +1,4 @@ -// Copyright 2021-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// Copyright 2021-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions @@ -187,6 +187,11 @@ struct CleanupMessage : SendMessageBase { void* id; }; +struct CancelBLSRequestMessage : SendMessageBase { + void* infer_payload_id; + bool is_cancelled; +}; + struct IsCancelledMessage : SendMessageBase { intptr_t response_factory_address; intptr_t request_address; diff --git a/src/python_be.cc b/src/python_be.cc index bdf7b95f..83e53852 100644 --- a/src/python_be.cc +++ b/src/python_be.cc @@ -1,4 +1,4 @@ -// Copyright 2020-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// Copyright 2020-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions @@ -765,6 +765,10 @@ ModelInstanceState::StubToParentMQMonitor() boost::asio::post(*thread_pool_, std::move(task)); break; } + case PYTHONSTUB_CancelBLSInferRequest: { + ProcessCancelBLSRequest(message); + break; + } default: { LOG_MESSAGE( TRITONSERVER_LOG_ERROR, "Unexpected message type received."); @@ -855,6 +859,40 @@ ModelInstanceState::ProcessCleanupRequest( } } +void +ModelInstanceState::ProcessCancelBLSRequest( + const std::unique_ptr& message) +{ + AllocatedSharedMemory message_shm = + Stub()->ShmPool()->Load(message->Args()); + CancelBLSRequestMessage* message_payload = + reinterpret_cast(message_shm.data_.get()); + + { + bi::scoped_lock lk{message_payload->mu}; + + intptr_t id = reinterpret_cast(message_payload->infer_payload_id); + try { + { + std::lock_guard lock(infer_payload_mu_); + if (infer_payload_.find(id) != infer_payload_.end()) { + infer_payload_[id]->SafeCancelRequest(); + } + } + message_payload->is_cancelled = true; + } + catch (const PythonBackendException& pb_exception) { + LOG_MESSAGE(TRITONSERVER_LOG_ERROR, pb_exception.what()); + } + + message_payload->waiting_on_stub = true; + message_payload->cv.notify_all(); + while (message_payload->waiting_on_stub) { + message_payload->cv.wait(lk); + } + } +} + void ModelInstanceState::ProcessIsRequestCancelled( const std::unique_ptr& message) diff --git a/src/python_be.h b/src/python_be.h index c98e1284..6082c50b 100644 --- a/src/python_be.h +++ b/src/python_be.h @@ -1,4 +1,4 @@ -// Copyright 2022-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// Copyright 2022-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions @@ -403,6 +403,9 @@ class ModelInstanceState : public BackendModelInstance { // Process the decoupled cleanup request for InferPayload and ResponseFactory void ProcessCleanupRequest(const std::unique_ptr& message); + // Process cancelling a BLS request + void ProcessCancelBLSRequest(const std::unique_ptr& message); + // Process request cancellation query void ProcessIsRequestCancelled(const std::unique_ptr& message); diff --git a/src/request_executor.cc b/src/request_executor.cc index 3c51e626..716d3c56 100644 --- a/src/request_executor.cc +++ b/src/request_executor.cc @@ -69,9 +69,15 @@ InferRequestComplete( TRITONSERVER_InferenceRequest* request, const uint32_t flags, void* userp) { if (request != nullptr) { + RequestCompletionUserp* completion_userp = + reinterpret_cast(userp); + completion_userp->infer_payload->SetRequestAddress(0L); + LOG_IF_ERROR( TRITONSERVER_InferenceRequestDelete(request), "Failed to delete inference request."); + + delete completion_userp; } } @@ -85,10 +91,16 @@ InferResponseComplete( std::vector> output_tensors; std::shared_ptr pb_error; std::string parameters_string; + TRITONSERVER_Error_Code error_code = TRITONSERVER_ERROR_INTERNAL; if (response != nullptr) { try { - THROW_IF_TRITON_ERROR(TRITONSERVER_InferenceResponseError(response)); + TRITONSERVER_Error* server_error = + TRITONSERVER_InferenceResponseError(response); + if (server_error != nullptr) { + error_code = TRITONSERVER_ErrorCode(server_error); + } + THROW_IF_TRITON_ERROR(server_error); uint32_t output_count; THROW_IF_TRITON_ERROR( @@ -182,7 +194,7 @@ InferResponseComplete( response = nullptr; } - pb_error = std::make_shared(pb_exception.what()); + pb_error = std::make_shared(pb_exception.what(), error_code); output_tensors.clear(); } @@ -313,6 +325,18 @@ ResponseAlloc( return nullptr; // Success } +void +InferRequestCancel(intptr_t request_address) +{ + if (request_address == 0L) { + return; + } + + TRITONSERVER_InferenceRequest* irequest = + reinterpret_cast(request_address); + THROW_IF_TRITON_ERROR(TRITONSERVER_InferenceRequestCancel(irequest)); +} + TRITONSERVER_Error* OutputBufferQuery( TRITONSERVER_ResponseAllocator* allocator, void* userp, @@ -355,6 +379,7 @@ RequestExecutor::Infer( bool is_ready = false; const char* model_name = infer_request->ModelName().c_str(); TRITONSERVER_InferenceRequest* irequest = nullptr; + RequestCompletionUserp* completion_userp = nullptr; try { int64_t model_version = infer_request->ModelVersion(); @@ -406,8 +431,10 @@ RequestExecutor::Infer( THROW_IF_TRITON_ERROR(TRITONSERVER_InferenceRequestSetTimeoutMicroseconds( irequest, infer_request->Timeout())); + completion_userp = new RequestCompletionUserp(infer_payload); THROW_IF_TRITON_ERROR(TRITONSERVER_InferenceRequestSetReleaseCallback( - irequest, InferRequestComplete, nullptr /* request_release_userp */)); + irequest, InferRequestComplete, + reinterpret_cast(completion_userp))); TRITONSERVER_InferenceTrace* trace = nullptr; if (infer_request->GetTrace().TritonTrace() != nullptr) { @@ -476,11 +503,21 @@ RequestExecutor::Infer( reinterpret_cast(infer_payload->ResponseAllocUserp().get()), InferResponseComplete, reinterpret_cast(infer_payload.get()))); + // Store the inference request address submitted to the Triton server for + // retrieval + infer_payload->SetRequestAddress(reinterpret_cast(irequest)); + infer_payload->SetRequestCancellationFunc(InferRequestCancel); + THROW_IF_TRITON_ERROR( TRITONSERVER_ServerInferAsync(server_, irequest, trace)); } } catch (const PythonBackendException& pb_exception) { + infer_payload->SetRequestAddress(0L); + if (completion_userp != nullptr) { + delete completion_userp; + } + LOG_IF_ERROR( TRITONSERVER_InferenceRequestDelete(irequest), "Failed to delete inference request."); diff --git a/src/request_executor.h b/src/request_executor.h index 1c5eb1fa..07562d6a 100644 --- a/src/request_executor.h +++ b/src/request_executor.h @@ -1,4 +1,4 @@ -// Copyright 2021-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. +// Copyright 2021-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions @@ -37,6 +37,12 @@ namespace triton { namespace backend { namespace python { TRITONSERVER_Error* CreateTritonErrorFromException( const PythonBackendException& pb_exception); +struct RequestCompletionUserp { + std::shared_ptr infer_payload; + RequestCompletionUserp(std::shared_ptr& infer_payload) + : infer_payload(infer_payload){}; +}; + class RequestExecutor { TRITONSERVER_ResponseAllocator* response_allocator_ = nullptr; TRITONSERVER_Server* server_;