|
35 | 35 | sys.path.append("../../common") |
36 | 36 |
|
37 | 37 | import queue |
| 38 | +import threading |
38 | 39 | import time |
39 | 40 | import unittest |
40 | 41 | from functools import partial |
@@ -241,6 +242,135 @@ def test_infer_pymodel_error(self): |
241 | 242 | initial_metrics_value, |
242 | 243 | ) |
243 | 244 |
|
| 245 | + # Test grpc stream behavior when triton_grpc_error is set to true. |
| 246 | + # Expected to close stream and return GRPC error when model returns error. |
| 247 | + def test_triton_grpc_error_error_on(self): |
| 248 | + model_name = "execute_grpc_error" |
| 249 | + shape = [2, 2] |
| 250 | + number_of_requests = 2 |
| 251 | + user_data = UserData() |
| 252 | + triton_client = grpcclient.InferenceServerClient(f"{_tritonserver_ipaddr}:8001") |
| 253 | + metadata = {"triton_grpc_error": "true"} |
| 254 | + triton_client.start_stream( |
| 255 | + callback=partial(callback, user_data), headers=metadata |
| 256 | + ) |
| 257 | + stream_end = False |
| 258 | + for i in range(number_of_requests): |
| 259 | + input_data = np.random.randn(*shape).astype(np.float32) |
| 260 | + inputs = [ |
| 261 | + grpcclient.InferInput( |
| 262 | + "IN", input_data.shape, np_to_triton_dtype(input_data.dtype) |
| 263 | + ) |
| 264 | + ] |
| 265 | + inputs[0].set_data_from_numpy(input_data) |
| 266 | + try: |
| 267 | + triton_client.async_stream_infer(model_name=model_name, inputs=inputs) |
| 268 | + result = user_data._completed_requests.get() |
| 269 | + if type(result) == InferenceServerException: |
| 270 | + # execute_grpc_error intentionally returns error with StatusCode.INTERNAL status on 2nd request |
| 271 | + self.assertEqual(str(result.status()), "StatusCode.INTERNAL") |
| 272 | + stream_end = True |
| 273 | + else: |
| 274 | + # Stream is not killed |
| 275 | + output_data = result.as_numpy("OUT") |
| 276 | + self.assertIsNotNone(output_data, "error: expected 'OUT'") |
| 277 | + except Exception as e: |
| 278 | + if stream_end == True: |
| 279 | + # We expect the stream to have closed |
| 280 | + self.assertTrue( |
| 281 | + True, |
| 282 | + "This should always pass as cancellation should succeed", |
| 283 | + ) |
| 284 | + else: |
| 285 | + self.assertFalse( |
| 286 | + True, "Unexpected Stream killed without Error from CORE" |
| 287 | + ) |
| 288 | + |
| 289 | + # Test grpc stream behavior when triton_grpc_error is set to true in multiple open streams. |
| 290 | + # Expected to close stream and return GRPC error when model returns error. |
| 291 | + def test_triton_grpc_error_multithreaded(self): |
| 292 | + thread1 = threading.Thread(target=self.test_triton_grpc_error_error_on) |
| 293 | + thread2 = threading.Thread(target=self.test_triton_grpc_error_error_on) |
| 294 | + # Start the threads |
| 295 | + thread1.start() |
| 296 | + thread2.start() |
| 297 | + # Wait for both threads to finish |
| 298 | + thread1.join() |
| 299 | + thread2.join() |
| 300 | + |
| 301 | + # Test grpc stream behavior when triton_grpc_error is set to true and subsequent stream is cancelled. |
| 302 | + # Expected cancellation is successful. |
| 303 | + def test_triton_grpc_error_cancel(self): |
| 304 | + model_name = "execute_grpc_error" |
| 305 | + shape = [2, 2] |
| 306 | + number_of_requests = 1 |
| 307 | + user_data = UserData() |
| 308 | + triton_server_url = "localhost:8001" # Replace with your Triton server address |
| 309 | + stream_end = False |
| 310 | + triton_client = grpcclient.InferenceServerClient(triton_server_url) |
| 311 | + |
| 312 | + metadata = {"triton_grpc_error": "true"} |
| 313 | + |
| 314 | + triton_client.start_stream( |
| 315 | + callback=partial(callback, user_data), headers=metadata |
| 316 | + ) |
| 317 | + |
| 318 | + for i in range(number_of_requests): |
| 319 | + input_data = np.random.randn(*shape).astype(np.float32) |
| 320 | + inputs = [ |
| 321 | + grpcclient.InferInput( |
| 322 | + "IN", input_data.shape, np_to_triton_dtype(input_data.dtype) |
| 323 | + ) |
| 324 | + ] |
| 325 | + inputs[0].set_data_from_numpy(input_data) |
| 326 | + try: |
| 327 | + triton_client.async_stream_infer(model_name=model_name, inputs=inputs) |
| 328 | + result = user_data._completed_requests.get() |
| 329 | + if type(result) == InferenceServerException: |
| 330 | + stream_end = True |
| 331 | + if i == 0: |
| 332 | + triton_client.stop_stream(cancel_requests=True) |
| 333 | + except Exception as e: |
| 334 | + if stream_end == True: |
| 335 | + # We expect the stream to have closed |
| 336 | + self.assertTrue( |
| 337 | + True, |
| 338 | + "This should always pass as cancellation should succeed", |
| 339 | + ) |
| 340 | + else: |
| 341 | + self.assertFalse( |
| 342 | + True, "Unexpected Stream killed without Error from CORE" |
| 343 | + ) |
| 344 | + self.assertTrue( |
| 345 | + True, |
| 346 | + "This should always pass as cancellation should succeed without any exception", |
| 347 | + ) |
| 348 | + |
| 349 | + # Test grpc stream behavior when triton_grpc_error is set to false |
| 350 | + # and subsequent stream is NOT closed when error is reported from CORE |
| 351 | + def test_triton_grpc_error_error_off(self): |
| 352 | + model_name = "execute_grpc_error" |
| 353 | + shape = [2, 2] |
| 354 | + number_of_requests = 4 |
| 355 | + response_counter = 0 |
| 356 | + user_data = UserData() |
| 357 | + triton_client = grpcclient.InferenceServerClient(f"{_tritonserver_ipaddr}:8001") |
| 358 | + triton_client.start_stream(callback=partial(callback, user_data)) |
| 359 | + for i in range(number_of_requests): |
| 360 | + input_data = np.random.randn(*shape).astype(np.float32) |
| 361 | + inputs = [ |
| 362 | + grpcclient.InferInput( |
| 363 | + "IN", input_data.shape, np_to_triton_dtype(input_data.dtype) |
| 364 | + ) |
| 365 | + ] |
| 366 | + inputs[0].set_data_from_numpy(input_data) |
| 367 | + triton_client.async_stream_infer(model_name=model_name, inputs=inputs) |
| 368 | + _ = user_data._completed_requests.get() |
| 369 | + response_counter += 1 |
| 370 | + # we expect response_counter == number_of_requests, |
| 371 | + # which indicates that after the first reported grpc error stream did NOT close and mode != triton_grpc_error |
| 372 | + self.assertEqual(response_counter, number_of_requests) |
| 373 | + |
244 | 374 |
|
245 | 375 | if __name__ == "__main__": |
246 | 376 | unittest.main() |
0 commit comments