-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathbenchmark-tflite.sh
More file actions
executable file
·37 lines (28 loc) · 1.9 KB
/
benchmark-tflite.sh
File metadata and controls
executable file
·37 lines (28 loc) · 1.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
#!/bin/bash
# Copyright (c) 2026 Qualcomm Technologies, Inc. All rights reserved.
# Add symlink for libOpenCL, tflite hardcodes the .so, it doesn't properly dynamically link to .so.X
ln -sf /usr/lib/aarch64-linux-gnu/libOpenCL.so.1 /usr/lib/aarch64-linux-gnu/libOpenCL.so
# Add a symlink so EIM files can use this
ln -sf /root/tensorflow/lite/delegates/gpu/libtensorflowlite_gpu_delegate.so /lib/aarch64-linux-gnu/
# For CPUFreq to use performance governer, run outside the container
echo "Run the following outside the container to have the CPUs run at full tilt:"
echo 'for i in /sys/devices/system/cpu/cpu*/cpufreq/scaling_governor ; do echo "performance" > $i ; done'
echo
# GPU hangcheck, also run this outside the container
echo "Extend the GPU hangcheck timer to avoid some models timing out:"
echo 'echo 6000 > /sys/kernel/debug/dri/0/hangcheck_period_ms'
echo
echo "Pausing for 30 seconds so you can do the above"
sleep 10
# This expects models to be present, but we can't distribute them inside the container, so bind mount them using e.g.
# docker run --volume /path/to/local/models:/root/models <..>
cd /root/tensorflow/lite/tools/benchmark/
set -x
for model in $(find /root/models -name "*.tflite") ; do
# ./benchmark_model --graph=${model} --enable_op_profiling=true --use_xnnpack=true --num_threads=$(nproc) --max_sec=300 --profiling_output_csv_file=${model}-gpu.csv --use_gpu=true |& tee ${model}-gpu-log.txt
# ./benchmark_model --graph=${model} --enable_op_profiling=true --use_xnnpack=true --num_threads=$(nproc) --max_sec=300 --profiling_output_csv_file=${model}-cpu.csv --use_gpu=false |& tee ${model}-cpu-log.txt
./benchmark_model --graph=${model} --num_threads=$(nproc) --use_gpu=true |& tee ${model}-gpu-log.txt
echo "Exit code: " $? >> ${model}-gpu-log.txt
./benchmark_model --graph=${model} --num_threads=$(nproc) --use_gpu=false |& tee ${model}-cpu-log.txt
echo "Exit code: " $? >> ${model}-cpu-log.txt
done