Skip to content

Commit 536dd18

Browse files
cccclaifacebook-github-bot
authored andcommitted
Add qnn 16a16w quantization test (#7039)
Summary: Pull Request resolved: #7039 Differential Revision: D66390212
1 parent b3f2a79 commit 536dd18

File tree

3 files changed

+817
-807
lines changed

3 files changed

+817
-807
lines changed

.ci/scripts/test_llama.sh

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,10 @@ while [[ $# -gt 0 ]]; do
2727
MODE="$2" # portable or xnnpack+custom or xnnpack+custom+qe
2828
shift 2
2929
;;
30+
-pt2e_quantize)
31+
PT2E_QUANTIZE="$2" # portable or xnnpack+custom or xnnpack+custom+qe
32+
shift 2
33+
;;
3034
-upload)
3135
UPLOAD_DIR="$2"
3236
shift 2
@@ -234,6 +238,10 @@ if [[ "${COREML}" == "ON" ]]; then
234238
fi
235239
if [[ "${QNN}" == "ON" ]]; then
236240
EXPORT_ARGS="${EXPORT_ARGS} -kv -v --qnn --disable_dynamic_shape"
241+
echo "PT2E_QUANTIZE is ${PT2E_QUANTIZE}"
242+
if [[ "${PT2E_QUANTIZE}" == "qnn_16a16w" ]]; then
243+
EXPORT_ARGS+=" --tokenizer_path tokenizer.model --pt2e_quantize qnn_16a16w --calibration_tasks wikitext --calibration_limit 1 --calibration_seq_length 128 --calibration_data Once "
244+
fi
237245
fi
238246
# Add dynamically linked library location
239247
$PYTHON_EXECUTABLE -m examples.models.llama.export_llama ${EXPORT_ARGS}

0 commit comments

Comments
 (0)