Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 2 additions & 23 deletions .ci/scripts/test_model.sh
Original file line number Diff line number Diff line change
Expand Up @@ -188,22 +188,6 @@ test_model_with_qnn() {
EXPORT_SCRIPT=edsr
# Additional deps for edsr
pip install piq
elif [[ "${MODEL_NAME}" == "cvt" ]]; then
EXPORT_SCRIPT=cvt
elif [[ "${MODEL_NAME}" == "dit" ]]; then
EXPORT_SCRIPT=dit
elif [[ "${MODEL_NAME}" == "efficientnet" ]]; then
EXPORT_SCRIPT=efficientnet
elif [[ "${MODEL_NAME}" == "focalnet" ]]; then
EXPORT_SCRIPT=focalnet
elif [[ "${MODEL_NAME}" == "mobilevit_v1" ]]; then
EXPORT_SCRIPT=mobilevit_v1
elif [[ "${MODEL_NAME}" == "mobilevit_v2" ]]; then
EXPORT_SCRIPT=mobilevit_v2
elif [[ "${MODEL_NAME}" == "pvt" ]]; then
EXPORT_SCRIPT=pvt
elif [[ "${MODEL_NAME}" == "swin" ]]; then
EXPORT_SCRIPT=swin_transformer
elif [[ "${MODEL_NAME}" == "albert" ]]; then
EXPORT_SCRIPT=albert
elif [[ "${MODEL_NAME}" == "bert" ]]; then
Expand All @@ -212,8 +196,6 @@ test_model_with_qnn() {
EXPORT_SCRIPT=distilbert
elif [[ "${MODEL_NAME}" == "eurobert" ]]; then
EXPORT_SCRIPT=eurobert
elif [[ "${MODEL_NAME}" == "roberta" ]]; then
EXPORT_SCRIPT=roberta
else
echo "Unsupported model $MODEL_NAME"
exit 1
Expand All @@ -228,13 +210,10 @@ test_model_with_qnn() {
"dl3"|"mv3"|"mv2"|"ic4"|"ic3"|"vit"|"mb"|"w2l")
SCRIPT_FOLDER=scripts
;;
"cvt"|"dit"|"focalnet"|"mobilevit_v2"|"pvt"|"swin")
SCRIPT_FOLDER=oss_scripts
;;
"albert"|"bert"|"distilbert"|"roberta"|"efficientnet"|"mobilevit_v1")
"albert"|"bert"|"distilbert")
pip install evaluate
SCRIPT_FOLDER=oss_scripts
# 16bit models will encounter op validation fail on some operations,
# Bert models running in 16bit will encounter op validation fail on some operations,
# which requires CHIPSET >= SM8550.
QNN_CHIPSET=SM8550
;;
Expand Down
6 changes: 3 additions & 3 deletions .github/workflows/trunk.yml
Original file line number Diff line number Diff line change
Expand Up @@ -470,7 +470,7 @@ jobs:
docker-image: executorch-ubuntu-22.04-qnn-sdk
submodules: 'recursive'
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
timeout: 90
timeout: 900
script: |
# The generic Linux job chooses to use base env, not the one setup by the image
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
Expand All @@ -489,14 +489,14 @@ jobs:
strategy:
matrix:
dtype: [fp32]
model: [cvt, dit, efficientnet, focalnet, mobilevit_v1, mobilevit_v2, pvt, swin, albert, bert, distilbert, roberta] # eurobert requires transfomer >= 4.48.0, skip for now
model: [albert, bert, distilbert] # eurobert requires transfomer >= 4.48.0, skip for now
fail-fast: false
with:
runner: linux.2xlarge
docker-image: executorch-ubuntu-22.04-qnn-sdk
submodules: 'recursive'
ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }}
timeout: 90
timeout: 900
script: |
# The generic Linux job chooses to use base env, not the one setup by the image
CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]")
Expand Down
2 changes: 1 addition & 1 deletion backends/qualcomm/builders/op_slice_copy.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ def define_node(
if start < 0:
start = start % input_tensor.shape[dim]

if len(node.args) > 3 and node.args[3] is not None:
if len(node.args) > 3:
end = min(cast(int, node.args[3]), input_tensor.shape[dim])
if end < 0:
end = end % input_tensor.shape[dim]
Expand Down
32 changes: 16 additions & 16 deletions backends/qualcomm/tests/test_qnn_delegate.py
Original file line number Diff line number Diff line change
Expand Up @@ -4585,13 +4585,14 @@ def test_gMLP(self):
self.assertGreaterEqual(msg["top_1"], 60)
self.assertGreaterEqual(msg["top_5"], 85)

def test_mobilevit_v1(self):
@unittest.skip("Only outputs good accuracy in QNN 2.29")
def test_mobilevit_v2(self):
if not self.required_envs([self.image_dataset]):
self.skipTest("missing required envs")

cmds = [
"python",
f"{self.executorch_root}/examples/qualcomm/oss_scripts/mobilevit_v1.py"
f"{self.executorch_root}/examples/qualcomm/oss_scripts/mobilevit_v2.py",
"--dataset",
self.image_dataset,
"--artifact",
Expand All @@ -4609,6 +4610,8 @@ def test_mobilevit_v1(self):
]
if self.host:
cmds.extend(["--host", self.host])
if self.shared_buffer:
cmds.extend(["--shared_buffer"])

p = subprocess.Popen(cmds, stdout=subprocess.DEVNULL)
with Listener((self.ip, self.port)) as listener:
Expand All @@ -4618,22 +4621,17 @@ def test_mobilevit_v1(self):
if "Error" in msg:
self.fail(msg["Error"])
else:
self.assertGreaterEqual(msg["top_1"], 70)
self.assertGreaterEqual(msg["top_1"], 50)
self.assertGreaterEqual(msg["top_5"], 85)

@unittest.skip("Only outputs good accuracy in QNN 2.29")
def test_mobilevit_v2(self):
def test_pvt(self):
if not self.required_envs([self.image_dataset]):
self.skipTest("missing required envs")

cmds = [
"python",
f"{self.executorch_root}/examples/qualcomm/oss_scripts/mobilevit_v2.py",
"--dataset",
f"{self.executorch_root}/examples/qualcomm/oss_scripts/pvt.py",
self.image_dataset,
"--artifact",
self.artifact_dir,
"--build_folder",
self.build_folder,
"--device",
self.device,
Expand All @@ -4646,8 +4644,6 @@ def test_mobilevit_v2(self):
]
if self.host:
cmds.extend(["--host", self.host])
if self.shared_buffer:
cmds.extend(["--shared_buffer"])

p = subprocess.Popen(cmds, stdout=subprocess.DEVNULL)
with Listener((self.ip, self.port)) as listener:
Expand All @@ -4657,17 +4653,21 @@ def test_mobilevit_v2(self):
if "Error" in msg:
self.fail(msg["Error"])
else:
self.assertGreaterEqual(msg["top_1"], 50)
self.assertGreaterEqual(msg["top_1"], 65)
self.assertGreaterEqual(msg["top_5"], 85)

def test_pvt(self):
def test_mobilevit1(self):
if not self.required_envs([self.image_dataset]):
self.skipTest("missing required envs")

cmds = [
"python",
f"{self.executorch_root}/examples/qualcomm/oss_scripts/pvt.py",
f"{self.executorch_root}/examples/qualcomm/oss_scripts/mobilevit1.py"
"--dataset",
self.image_dataset,
"--artifact",
self.artifact_dir,
"--build_folder",
self.build_folder,
"--device",
self.device,
Expand All @@ -4689,7 +4689,7 @@ def test_pvt(self):
if "Error" in msg:
self.fail(msg["Error"])
else:
self.assertGreaterEqual(msg["top_1"], 65)
self.assertGreaterEqual(msg["top_1"], 70)
self.assertGreaterEqual(msg["top_5"], 85)

def test_regnet(self):
Expand Down
29 changes: 10 additions & 19 deletions examples/qualcomm/oss_scripts/deit.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,10 @@

import getpass
import json
import logging
import os
from multiprocessing.connection import Client

import numpy as np
import torch
from executorch.backends.qualcomm._passes.qnn_pass_manager import (
get_capture_program_passes,
)
Expand Down Expand Up @@ -48,23 +46,16 @@ def main(args):
data_num = 100
height = config.image_size
width = config.image_size

if args.ci:
inputs = [(torch.rand(1, 3, height, width),)]
logging.warning(
"This option is for CI to verify the export flow. It uses random input and will result in poor accuracy."
)
else:
inputs, targets, input_list = get_imagenet_dataset(
dataset_path=f"{args.dataset}",
data_size=data_num,
image_shape=(height, width),
crop_size=(height, width),
)
inputs, targets, input_list = get_imagenet_dataset(
dataset_path=f"{args.dataset}",
data_size=data_num,
image_shape=(height, width),
crop_size=(height, width),
)

# Get the Deit model.
model = get_instance()
pte_filename = "deit_qnn_q8"
pte_filename = "deit_qnn"

# lower to QNN
passes_job = get_capture_program_passes()
Expand Down Expand Up @@ -129,8 +120,8 @@ def main(args):
parser.add_argument(
"-a",
"--artifact",
help="path for storing generated artifacts and output by this example. Default ./deit",
default="./deit",
help="path for storing generated artifacts and output by this example. Default ./deit_qnn",
default="./deit_qnn",
type=str,
)

Expand All @@ -143,7 +134,7 @@ def main(args):
"for https://www.kaggle.com/datasets/ifigotin/imagenetmini-1000)"
),
type=str,
required=False,
required=True,
)

args = parser.parse_args()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ def main(args):
.to("cpu")
)

pte_filename = "mobilevit_v1_qnn_q16"
pte_filename = "mobilevit1_qnn_q16"
build_executorch_binary(
module.eval(),
inputs[0],
Expand Down Expand Up @@ -157,8 +157,8 @@ def main(args):
"-a",
"--artifact",
help="path for storing generated artifacts by this example. "
"Default ./mobilevit_v1",
default="./mobilevit_v1",
"Default ./mobilevit1",
default="./mobilevit1",
type=str,
)

Expand Down
28 changes: 7 additions & 21 deletions examples/qualcomm/oss_scripts/roberta.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@

import getpass
import json
import logging
import os
from multiprocessing.connection import Client

Expand Down Expand Up @@ -39,29 +38,16 @@ def main(args):
skip_node_id_set, skip_node_op_set = parse_skip_delegation_node(args)

os.makedirs(args.artifact, exist_ok=True)
data_size = 100

tokenizer = AutoTokenizer.from_pretrained("xlm-roberta-base")
data_size = 100
if args.ci:
random_ids = torch.randint(low=0, high=100, size=(1, 100), dtype=torch.int32)
attention_mask = torch.ones((1, 100), dtype=torch.float32)
inputs = [
(
random_ids,
attention_mask,
)
]
logging.warning(
"This option is for CI to verify the export flow. It uses random input and will result in poor accuracy."
)
else:
inputs, targets, input_list = get_masked_language_model_dataset(
args.dataset, tokenizer, data_size
)
inputs, targets, input_list = get_masked_language_model_dataset(
args.dataset, tokenizer, data_size
)

# Get the Roberta model.
model = get_instance(args)
pte_filename = "roberta_qnn_q16"
pte_filename = "roberta_qnn"

# lower to QNN
passes_job = get_capture_program_passes()
Expand Down Expand Up @@ -151,7 +137,7 @@ def main(args):
"-a",
"--artifact",
help="path for storing generated artifacts and output by this example. Default ./Roberta_qnn",
default="./roberta",
default="./Roberta_qnn",
type=str,
)
parser.add_argument(
Expand All @@ -163,7 +149,7 @@ def main(args):
"for https://www.kaggle.com/datasets/mikeortman/wikipedia-sentences"
),
type=str,
required=False,
required=True,
)

args = parser.parse_args()
Expand Down
7 changes: 3 additions & 4 deletions examples/qualcomm/oss_scripts/swin_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@ def main(args):

data_num = 100
if args.ci:
inputs = [(torch.rand(1, 3, 224, 224),)]
inputs = [torch.rand(1, 3, 224, 224)]
logging.warning(
"This option is for CI to verify the export flow. It uses random input and will result in poor accuracy."
)
Expand Down Expand Up @@ -181,9 +181,8 @@ def main(args):
parser.add_argument(
"-a",
"--artifact",
help="path for storing generated artifacts by this example. "
"Default ./swin_transformer",
default="./swin_transformer",
help="path for storing generated artifacts by this example. " "Default ./swin",
default="./swin",
type=str,
)

Expand Down
Loading