Skip to content

Commit c40a3f5

Browse files
authored
Removing TensorFlow obsolete tests scenarios. (#8450)
1 parent be7d4b1 commit c40a3f5

File tree

2 files changed

+15
-66
lines changed

2 files changed

+15
-66
lines changed

qa/L0_perf_deeprecommender/test.sh

Lines changed: 3 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
#!/bin/bash
2-
# Copyright 2019-2024, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2+
# Copyright 2019-2025, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
33
#
44
# Redistribution and use in source and binary forms, with or without
55
# modification, are permitted provided that the following conditions
@@ -88,24 +88,9 @@ if [ $? -ne 0 ]; then
8888
fi
8989
rm tensorrt_models/deeprecommender_plan/model.onnx
9090

91-
OPTIMIZED_MODEL_NAMES="deeprecommender_graphdef_trt"
92-
93-
# Create optimized models (TF-TRT and ONNX-TRT)
94-
rm -fr optimized_model_store && mkdir optimized_model_store
95-
for MODEL_NAME in $OPTIMIZED_MODEL_NAMES; do
96-
BASE_MODEL=$(echo ${MODEL_NAME} | cut -d '_' -f 1,2)
97-
cp -r $REPODIR/perf_model_store/${BASE_MODEL} optimized_model_store/${MODEL_NAME}
98-
CONFIG_PATH="optimized_model_store/${MODEL_NAME}/config.pbtxt"
99-
sed -i "s/^name: \"${BASE_MODEL}\"/name: \"${MODEL_NAME}\"/" ${CONFIG_PATH}
100-
echo "optimization { execution_accelerators {" >> ${CONFIG_PATH}
101-
echo "gpu_execution_accelerator : [ {" >> ${CONFIG_PATH}
102-
echo "name : \"tensorrt\" " >> ${CONFIG_PATH}
103-
echo "} ]" >> ${CONFIG_PATH}
104-
echo "}}" >> ${CONFIG_PATH}
105-
done
10691

10792
# Tests with each model
108-
for FRAMEWORK in graphdef plan graphdef_trt onnx libtorch; do
93+
for FRAMEWORK in plan onnx libtorch; do
10994
MODEL_NAME=${MODEL}_${FRAMEWORK}
11095
if [ "$FRAMEWORK" == "plan" ]; then
11196
REPO=`pwd`/tensorrt_models
@@ -173,7 +158,7 @@ fi
173158
rm tensorrt_models/deeprecommender_plan/model.onnx
174159

175160
# Tests with each model
176-
for FRAMEWORK in graphdef plan graphdef_trt onnx libtorch; do
161+
for FRAMEWORK in plan onnx libtorch; do
177162
MODEL_NAME=${MODEL}_${FRAMEWORK}
178163
if [ "$FRAMEWORK" == "plan" ]; then
179164
REPO=`pwd`/tensorrt_models

qa/L0_perf_resnet/test.sh

Lines changed: 12 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -43,15 +43,12 @@ rm -f *.log *.csv *.tjson *.json
4343
PROTOCOLS="grpc http triton_c_api"
4444

4545
TRT_MODEL_NAME="resnet50_fp32_plan"
46-
TF_MODEL_NAME="resnet50v1.5_fp16_savedmodel"
4746
PYT_MODEL_NAME="resnet50_fp32_libtorch"
4847
ONNX_MODEL_NAME="resnet50_fp32_onnx"
4948

5049
# The base model name should be the prefix to the
5150
# respective optimized model name.
52-
TFTRT_MODEL_NAME="resnet50v1.5_fp16_savedmodel_trt"
5351
ONNXTRT_MODEL_NAME="resnet50_fp32_onnx_trt"
54-
TFAMP_MODEL_NAME="resnet50v1.5_fp16_savedmodel_amp"
5552

5653
ARCH=${ARCH:="x86_64"}
5754
REPODIR=${REPODIR:="/data/inferenceserver/${REPO_VERSION}"}
@@ -67,15 +64,10 @@ STATIC_BATCH=1
6764
INSTANCE_CNT=1
6865
CONCURRENCY=1
6966

70-
MODEL_NAMES="${TRT_MODEL_NAME} ${TF_MODEL_NAME} ${ONNX_MODEL_NAME} ${PYT_MODEL_NAME}"
67+
MODEL_NAMES="${TRT_MODEL_NAME} ${ONNX_MODEL_NAME} ${PYT_MODEL_NAME}"
68+
69+
OPTIMIZED_MODEL_NAMES="${ONNXTRT_MODEL_NAME}"
7170

72-
# Disable TF-TRT test on Jetson due to Segfault
73-
# Disable ORT-TRT test on Jetson due to support being disabled
74-
if [ "$ARCH" == "aarch64" ]; then
75-
OPTIMIZED_MODEL_NAMES="${TFAMP_MODEL_NAME}"
76-
else
77-
OPTIMIZED_MODEL_NAMES="${TFTRT_MODEL_NAME} ${TFAMP_MODEL_NAME} ${ONNXTRT_MODEL_NAME}"
78-
fi
7971

8072
# Create optimized models
8173
rm -fr optimized_model_store && mkdir optimized_model_store
@@ -86,21 +78,15 @@ for MODEL_NAME in $OPTIMIZED_MODEL_NAMES; do
8678
sed -i "s/^name: \"${BASE_MODEL}\"/name: \"${MODEL_NAME}\"/" ${CONFIG_PATH}
8779
echo "optimization { execution_accelerators {" >> ${CONFIG_PATH}
8880
echo "gpu_execution_accelerator : [ {" >> ${CONFIG_PATH}
89-
if [ "${MODEL_NAME}" = "${TFAMP_MODEL_NAME}" ] ; then
90-
echo "name : \"auto_mixed_precision\" " >> ${CONFIG_PATH}
91-
else
92-
echo "name : \"tensorrt\" " >> ${CONFIG_PATH}
93-
if [ "${MODEL_NAME}" = "${TFTRT_MODEL_NAME}" ] ; then
94-
echo "parameters { key: \"precision_mode\" value: \"FP16\" }" >> ${CONFIG_PATH}
95-
fi
96-
97-
if [ "${MODEL_NAME}" = "${ONNXTRT_MODEL_NAME}" ] ; then
98-
echo "parameters { key: \"precision_mode\" value: \"FP16\" }" >> ${CONFIG_PATH}
99-
echo "parameters { key: \"max_workspace_size_bytes\" value: \"1073741824\" }" >> ${CONFIG_PATH}
100-
echo "parameters { key: \"trt_engine_cache_enable\" value: \"1\" }" >> ${CONFIG_PATH}
101-
echo "parameters { key: \"trt_engine_cache_path\" value: \"${CACHE_PATH}\" } " >> ${CONFIG_PATH}
102-
fi
81+
echo "name : \"tensorrt\" " >> ${CONFIG_PATH}
82+
83+
if [ "${MODEL_NAME}" = "${ONNXTRT_MODEL_NAME}" ] ; then
84+
echo "parameters { key: \"precision_mode\" value: \"FP16\" }" >> ${CONFIG_PATH}
85+
echo "parameters { key: \"max_workspace_size_bytes\" value: \"1073741824\" }" >> ${CONFIG_PATH}
86+
echo "parameters { key: \"trt_engine_cache_enable\" value: \"1\" }" >> ${CONFIG_PATH}
87+
echo "parameters { key: \"trt_engine_cache_path\" value: \"${CACHE_PATH}\" } " >> ${CONFIG_PATH}
10388
fi
89+
10490
echo "} ]" >> ${CONFIG_PATH}
10591
echo "}}" >> ${CONFIG_PATH}
10692
done
@@ -213,26 +199,4 @@ for MODEL_NAME in $OPTIMIZED_MODEL_NAMES; do
213199
ARCH=${ARCH} \
214200
bash -x run_test.sh
215201
done
216-
done
217-
218-
# FIXME Disable the following due to
219-
# https://jirasw.nvidia.com/browse/DLIS-2933.
220-
#
221-
# Needs this additional test configuration for comparing against TFS.
222-
if [ "$ARCH" == "x86_64" ]; then
223-
MODEL_NAME=${TF_MODEL_NAME}
224-
REPO=$REPODIR/perf_model_store
225-
STATIC_BATCH=128
226-
INSTANCE_CNT=1
227-
CONCURRENCY=1
228-
FRAMEWORK=$(echo ${MODEL_NAME} | cut -d '_' -f 3)
229-
MODEL_NAME=${MODEL_NAME} \
230-
MODEL_FRAMEWORK=${FRAMEWORK} \
231-
MODEL_PATH="$REPO/${MODEL_NAME}" \
232-
STATIC_BATCH=${STATIC_BATCH} \
233-
PERF_CLIENT_PROTOCOL="grpc" \
234-
INSTANCE_CNT=${INSTANCE_CNT} \
235-
CONCURRENCY=${CONCURRENCY} \
236-
ARCH=${ARCH} \
237-
bash -x run_test.sh
238-
fi
202+
done

0 commit comments

Comments
 (0)