|
1 |
| -// Copyright 2022-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved. |
| 1 | +// Copyright 2022-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved. |
2 | 2 | //
|
3 | 3 | // Redistribution and use in source and binary forms, with or without
|
4 | 4 | // modification, are permitted provided that the following conditions
|
@@ -226,6 +226,11 @@ PbMemory::LoadFromSharedMemory(
|
226 | 226 | MemoryShm* memory_shm_ptr = reinterpret_cast<MemoryShm*>(data_shm);
|
227 | 227 | char* memory_data_shm = data_shm + sizeof(MemoryShm);
|
228 | 228 |
|
| 229 | + if (memory_data_shm + memory_shm_ptr->byte_size > |
| 230 | + (char*)shm_pool->GetBaseAddress() + shm_pool->GetCurrentCapacity()) { |
| 231 | + throw PythonBackendException("Attempted to access out of bounds memory."); |
| 232 | + } |
| 233 | + |
229 | 234 | char* data_ptr = nullptr;
|
230 | 235 | bool opened_cuda_ipc_handle = false;
|
231 | 236 | if (memory_shm_ptr->memory_type == TRITONSERVER_MEMORY_GPU &&
|
@@ -275,6 +280,11 @@ PbMemory::LoadFromSharedMemory(
|
275 | 280 | reinterpret_cast<MemoryShm*>(memory_shm.data_.get());
|
276 | 281 | char* memory_data_shm = memory_shm.data_.get() + sizeof(MemoryShm);
|
277 | 282 |
|
| 283 | + if (memory_data_shm + memory_shm_ptr->byte_size > |
| 284 | + (char*)shm_pool->GetBaseAddress() + shm_pool->GetCurrentCapacity()) { |
| 285 | + throw PythonBackendException("Attempted to access out of bounds memory."); |
| 286 | + } |
| 287 | + |
278 | 288 | char* data_ptr = nullptr;
|
279 | 289 | bool opened_cuda_ipc_handle = false;
|
280 | 290 | if (memory_shm_ptr->memory_type == TRITONSERVER_MEMORY_GPU) {
|
|
0 commit comments