2121# It is recommended to execute this script inside the Model Optimization Toolkit TensorRT Docker container.
2222# Please ensure that the ImageNet dataset is available in the container at the specified path.
2323
24- # Usage: ./test_onnx_ptq.sh [--no-clean] [--eval] [/path/to/imagenet] [/path/to/models]
24+ # Usage: ./test_onnx_ptq.sh [--no-clean] [--eval] [/path/to/imagenet] [/path/to/models] [/path/to/timing_cache]
2525
2626set -exo pipefail
2727
@@ -40,6 +40,7 @@ clean_mode=true
4040eval_mode=false
4141imagenet_path=" "
4242models_folder=" "
43+ timing_cache_path=" "
4344
4445for arg in " $@ " ; do
4546 case $arg in
@@ -56,6 +57,8 @@ for arg in "$@"; do
5657 imagenet_path=" $arg "
5758 elif [ -z " $models_folder " ]; then
5859 models_folder=" $arg "
60+ elif [ -z " $timing_cache_path " ]; then
61+ timing_cache_path=" $arg "
5962 fi
6063 shift
6164 ;;
@@ -68,6 +71,7 @@ export TQDM_DISABLE=1
6871# Setting image and model paths (contains 8 models)
6972imagenet_path=${imagenet_path:-/ data/ imagenet/ }
7073models_folder=${models_folder:-/ models/ onnx}
74+ timing_cache_path=${timing_cache_path:-/ build/ timing.cache}
7175calib_size=1
7276eval_size=100
7377batch_size=1
@@ -192,7 +196,7 @@ if [ "$eval_mode" = true ]; then
192196 --model_name=" ${timm_model_name[$model_name]} " \
193197 --engine_precision=$precision \
194198 --results_path=$model_dir /$quant_mode /${model_name} _${quant_mode} .csv \
195- --timing_cache_path=build/timing.cache
199+ --timing_cache_path=$timing_cache_path
196200 else
197201 python evaluate.py \
198202 --onnx_path=$eval_model_path \
@@ -203,7 +207,7 @@ if [ "$eval_mode" = true ]; then
203207 --model_name=" ${timm_model_name[$model_name]} " \
204208 --engine_precision=$precision \
205209 --results_path=$model_dir /$quant_mode /${model_name} _${quant_mode} .csv \
206- --timing_cache_path=build/timing.cache
210+ --timing_cache_path=$timing_cache_path
207211 fi
208212 done
209213
0 commit comments