Skip to content

Commit 13cb6f0

Browse files
authored
test: Fix tests for ubuntu 24.04. upgrade (#7791)
1 parent 75a97f4 commit 13cb6f0

File tree

6 files changed

+15
-15
lines changed

6 files changed

+15
-15
lines changed

qa/L0_dlpack_multi_gpu/test.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -41,7 +41,7 @@ source ../common/util.sh
4141

4242
# Uninstall the non CUDA version of PyTorch
4343
pip3 uninstall -y torch
44-
pip3 install torch==1.13.0+cu117 -f https://download.pytorch.org/whl/torch_stable.html
44+
pip3 install torch==2.3.1+cu118 -f https://download.pytorch.org/whl/torch_stable.html
4545
pip3 install tensorflow
4646

4747
# Install CuPy for testing non_blocking compute streams

qa/L0_infer/test.sh

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -144,9 +144,9 @@ export BATCH
144144

145145
if [[ $BACKENDS == *"python_dlpack"* ]]; then
146146
if [[ "aarch64" != $(uname -m) ]] ; then
147-
pip3 install torch==2.2.0+cpu -f https://download.pytorch.org/whl/torch_stable.html
147+
pip3 install torch==2.3.1+cpu -f https://download.pytorch.org/whl/torch_stable.html
148148
else
149-
pip3 install torch==2.2.0 -f https://download.pytorch.org/whl/torch_stable.html
149+
pip3 install torch==2.3.1 -f https://download.pytorch.org/whl/torch_stable.html
150150
fi
151151
fi
152152

@@ -352,9 +352,9 @@ if [ "$TEST_VALGRIND" -eq 1 ]; then
352352
TESTING_BACKENDS="python python_dlpack onnx"
353353
EXPECTED_NUM_TESTS=42
354354
if [[ "aarch64" != $(uname -m) ]] ; then
355-
pip3 install torch==1.13.0+cpu -f https://download.pytorch.org/whl/torch_stable.html
355+
pip3 install torch==2.3.1+cpu -f https://download.pytorch.org/whl/torch_stable.html
356356
else
357-
pip3 install torch==1.13.0 -f https://download.pytorch.org/whl/torch_stable.html
357+
pip3 install torch==2.3.1 -f https://download.pytorch.org/whl/torch_stable.html
358358
fi
359359

360360
for BACKENDS in $TESTING_BACKENDS; do

qa/L0_io/test.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
#!/bin/bash
2-
# Copyright 2019-2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2+
# Copyright 2019-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
33
#
44
# Redistribution and use in source and binary forms, with or without
55
# modification, are permitted provided that the following conditions
@@ -54,7 +54,7 @@ LD_LIBRARY_PATH=/opt/tritonserver/lib:$LD_LIBRARY_PATH
5454
rm -f $CLIENT_LOG*
5555

5656
# PyTorch is required for the Python backend dlpack add sub models
57-
pip3 install torch==1.13.0+cu117 -f https://download.pytorch.org/whl/torch_stable.html
57+
pip3 install torch==2.3.1+cu118 -f https://download.pytorch.org/whl/torch_stable.html
5858
RET=0
5959

6060
# Prepare float32 models with basic config

qa/L0_libtorch_instance_group_kind_model/test.sh

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
#!/bin/bash
2-
# Copyright 2023, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2+
# Copyright 2023-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
33
#
44
# Redistribution and use in source and binary forms, with or without
55
# modification, are permitted provided that the following conditions
@@ -39,7 +39,7 @@ if [ ! -z "$TEST_REPO_ARCH" ]; then
3939
fi
4040

4141
pip3 uninstall -y torch
42-
pip3 install torch==1.13.0+cu117 -f https://download.pytorch.org/whl/torch_stable.html
42+
pip3 install torch==2.3.1+cu118 -f https://download.pytorch.org/whl/torch_stable.html
4343

4444
DATADIR=/data/inferenceserver/${REPO_VERSION}/qa_model_repository
4545
SERVER=/opt/tritonserver/bin/tritonserver

qa/L0_trace/opentelemetry_unittest.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -221,7 +221,7 @@ def _check_events(self, span_name, events, is_cancelled):
221221
self.assertFalse(
222222
all(entry in events for entry in root_events_http + root_events_grpc)
223223
)
224-
self.assertEquals(len(events), len(compute_events))
224+
self.assertEqual(len(events), len(compute_events))
225225

226226
elif span_name == self.root_span:
227227
# Check that root span has INFER_RESPONSE_COMPLETE, _RECV/_WAITREAD
@@ -233,12 +233,12 @@ def _check_events(self, span_name, events, is_cancelled):
233233
if "HTTP" in events:
234234
self.assertTrue(all(entry in events for entry in root_events_http))
235235
self.assertFalse(all(entry in events for entry in root_events_grpc))
236-
self.assertEquals(len(events), len(root_events_http))
236+
self.assertEqual(len(events), len(root_events_http))
237237

238238
elif "GRPC" in events:
239239
self.assertTrue(all(entry in events for entry in root_events_grpc))
240240
self.assertFalse(all(entry in events for entry in root_events_http))
241-
self.assertEquals(len(events), len(root_events_grpc))
241+
self.assertEqual(len(events), len(root_events_grpc))
242242

243243
if is_cancelled == False:
244244
self.assertFalse(all(entry in events for entry in request_events))
@@ -254,7 +254,7 @@ def _check_events(self, span_name, events, is_cancelled):
254254
all(entry in events for entry in root_events_http + root_events_grpc)
255255
)
256256
self.assertFalse(all(entry in events for entry in compute_events))
257-
self.assertEquals(len(events), len(request_events))
257+
self.assertEqual(len(events), len(request_events))
258258

259259
elif span_name.startswith("CUSTOM_ACTIVITY"):
260260
custom_activity_events = []
@@ -276,7 +276,7 @@ def _check_events(self, span_name, events, is_cancelled):
276276
all(entry in events for entry in custom_activity_events),
277277
"Span " + span_name,
278278
)
279-
self.assertEquals(
279+
self.assertEqual(
280280
len(events), len(custom_activity_events), "Span " + span_name
281281
)
282282

qa/L0_warmup/test.sh

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -415,7 +415,7 @@ wait $SERVER_PID
415415
# Test the onnx model to verify that the memory type of the output tensor
416416
# remains unchanged with the warmup setting
417417
pip3 uninstall -y torch
418-
pip3 install torch==1.13.0+cu117 -f https://download.pytorch.org/whl/torch_stable.html
418+
pip3 install torch==2.3.1+cu118 -f https://download.pytorch.org/whl/torch_stable.html
419419

420420
rm -fr models && mkdir models
421421
cp -r /data/inferenceserver/${REPO_VERSION}/qa_model_repository/onnx_nobatch_float32_float32_float32 models/.

0 commit comments

Comments
 (0)