Skip to content
Merged
Show file tree
Hide file tree
Changes from 5 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions backends/mediatek/partitioner.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@ def ops_to_not_decompose(
torch.ops.aten.upsample_bilinear2d.vec,
torch.ops.aten.upsample_nearest2d.default,
torch.ops.aten.upsample_nearest2d.vec,
torch.ops.aten._safe_softmax.default,
]
return (ops_not_decompose, None)

Expand Down
1 change: 1 addition & 0 deletions backends/mediatek/scripts/mtk_build.sh
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,7 @@ rm -rf cmake-android-out && mkdir cmake-android-out && cd cmake-android-out
cmake -DBUCK2="$BUCK_PATH" \
-DCMAKE_TOOLCHAIN_FILE="$ANDROID_NDK/build/cmake/android.toolchain.cmake" \
-DANDROID_ABI=arm64-v8a \
-DANDROID_PLATFORM=android-26 \
-DEXECUTORCH_BUILD_NEURON=ON \
-DNEURON_BUFFER_ALLOCATOR_LIB="$NEURON_BUFFER_ALLOCATOR_LIB" \
..
Expand Down
12 changes: 6 additions & 6 deletions examples/mediatek/aot_utils/oss_utils/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
# LICENSE file in the root directory of this source tree.

import os
from typing import Optional
from typing import Dict, Optional

import torch
from executorch import exir
Expand All @@ -24,6 +24,8 @@ def build_executorch_binary(
file_name,
dataset,
quant_dtype: Optional[Precision] = None,
skip_op_name: Optional[set] = None,
skip_op_type: Optional[set] = None,
):
if quant_dtype is not None:
quantizer = NeuropilotQuantizer()
Expand All @@ -47,14 +49,12 @@ def build_executorch_binary(
from executorch.exir.program._program import to_edge_transform_and_lower

edge_compile_config = exir.EdgeCompileConfig(_check_ir_validity=False)
# skipped op names are used for deeplabV3 model
neuro_partitioner = NeuropilotPartitioner(
[CompileSpec("platform-config", b"mt6989")],
op_names_to_skip={
"aten_convolution_default_106",
"aten_convolution_default_107",
},
op_types_to_skip=skip_op_type,
op_names_to_skip=skip_op_name,
)

edge_prog = to_edge_transform_and_lower(
aten_dialect,
compile_config=edge_compile_config,
Expand Down
98 changes: 98 additions & 0 deletions examples/mediatek/model_export_scripts/dcgan.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
# Copyright (c) MediaTek Inc.
# All rights reserved
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys

if os.getcwd() not in sys.path:
sys.path.append(os.getcwd())
import argparse
import json
import os

import dcgan_main
import numpy as np

import torch
from aot_utils.oss_utils.utils import build_executorch_binary, make_output_dir
from executorch.backends.mediatek import Precision


class NhwcWrappedModel(torch.nn.Module):
def __init__(self, is_gen=True):
super(NhwcWrappedModel, self).__init__()
if is_gen:
self.dcgan = dcgan_main.Generator()
else:
self.dcgan = dcgan_main.Discriminator()

def forward(self, input1):
nchw_input1 = input1.permute(0, 3, 1, 2)
output = self.dcgan(nchw_input1)
return output


if __name__ == "__main__":
parser = argparse.ArgumentParser()

parser.add_argument(
"-a",
"--artifact",
help="path for storing generated artifacts by this example. " "Default ./dcgan",
default="./dcgan",
type=str,
)

args = parser.parse_args()

# ensure the working directory exist.
os.makedirs(args.artifact, exist_ok=True)

# prepare dummy data
inputG = torch.randn(1, 1, 1, 100)
inputD = torch.randn(1, 64, 64, 3)

# build Generator
netG_instance = NhwcWrappedModel(True)
netG_pte_filename = "dcgan_netG_mtk"
build_executorch_binary(
netG_instance.eval(),
(torch.randn(1, 1, 1, 100),),
f"{args.artifact}/{netG_pte_filename}",
[(inputG,)],
quant_dtype=Precision.A8W8,
)

# build Discriminator
netD_instance = NhwcWrappedModel(False)
netD_pte_filename = "dcgan_netD_mtk"
build_executorch_binary(
netD_instance.eval(),
(torch.randn(1, 64, 64, 3),),
f"{args.artifact}/{netD_pte_filename}",
[(inputD,)],
quant_dtype=Precision.A8W8,
)

# save data to inference on device
input_list_file = f"{args.artifact}/input_list_G.txt"
with open(input_list_file, "w") as f:
f.write("inputG_0_0.bin")
f.flush()
file_name = f"{args.artifact}/inputG_0_0.bin"
inputG.detach().numpy().tofile(file_name)
file_name = f"{args.artifact}/goldenG_0_0.bin"
goldenG = netG_instance(inputG)
goldenG.detach().numpy().tofile(file_name)

input_list_file = f"{args.artifact}/input_list_D.txt"
with open(input_list_file, "w") as f:
f.write("inputD_0_0.bin")
f.flush()
file_name = f"{args.artifact}/inputD_0_0.bin"
inputD.detach().numpy().tofile(file_name)
file_name = f"{args.artifact}/goldenD_0_0.bin"
goldenD = netD_instance(inputD)
goldenD.detach().numpy().tofile(file_name)
72 changes: 72 additions & 0 deletions examples/mediatek/model_export_scripts/dcgan_main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
"""Ref https://github.com/pytorch/examples/blob/main/dcgan/main.py"""

import torch.nn as nn


class Generator(nn.Module):
def __init__(self):
super().__init__()
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(100, 64 * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(64 * 8),
nn.ReLU(True),
# state size. (64*8) x 4 x 4
nn.ConvTranspose2d(64 * 8, 64 * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(64 * 4),
nn.ReLU(True),
# state size. (64*4) x 8 x 8
nn.ConvTranspose2d(64 * 4, 64 * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(64 * 2),
nn.ReLU(True),
# state size. (64*2) x 16 x 16
nn.ConvTranspose2d(64 * 2, 64, 4, 2, 1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(True),
# state size. (64) x 32 x 32
nn.ConvTranspose2d(64, 3, 4, 2, 1, bias=False),
nn.Tanh(),
# state size. (3) x 64 x 64
)

def forward(self, input):
output = self.main(input)
return output


# main_netG_input_shape = [1, 100, 1, 1]
# model = Generator()


class Discriminator(nn.Module):
def __init__(self):
super().__init__()
self.main = nn.Sequential(
# input is (3) x 64 x 64
nn.Conv2d(3, 64, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (64) x 32 x 32
nn.Conv2d(64, 64 * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(64 * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (64*2) x 16 x 16
nn.Conv2d(64 * 2, 64 * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(64 * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (64*4) x 8 x 8
nn.Conv2d(64 * 4, 64 * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(64 * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (64*8) x 4 x 4
nn.Conv2d(64 * 8, 1, 4, 1, 0, bias=False),
nn.Sigmoid(),
)

def forward(self, input):
output = self.main(input)

return output.view(-1, 1).squeeze(1)


# main_netD_input_shape = [1, 3, 64, 64]
# model = Discriminator()
15 changes: 10 additions & 5 deletions examples/mediatek/model_export_scripts/deeplab_v3.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,18 +3,19 @@
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys

if os.getcwd() not in sys.path:
sys.path.append(os.getcwd())
import argparse
import os
import random

import numpy as np

import torch
from aot_utils.oss_utils.utils import build_executorch_binary
from executorch.backends.mediatek import Precision
from executorch.examples.mediatek.aot_utils.oss_utils.utils import (
build_executorch_binary,
)
from executorch.examples.models.deeplab_v3 import DeepLabV3ResNet101Model


Expand All @@ -26,7 +27,7 @@ def __init__(self):
def forward(self, input1):
nchw_input1 = input1.permute(0, 3, 1, 2)
nchw_output = self.deeplabv3(nchw_input1)
return nchw_output.permute(0, 2, 3, 1)
return nchw_output


def get_dataset(data_size, dataset_dir, download):
Expand Down Expand Up @@ -121,4 +122,8 @@ def get_dataset(data_size, dataset_dir, download):
f"{args.artifact}/{pte_filename}",
inputs,
quant_dtype=Precision.A8W8,
skip_op_name={
"aten_convolution_default_106",
"aten_convolution_default_107",
},
)
7 changes: 4 additions & 3 deletions examples/mediatek/model_export_scripts/edsr.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,14 +6,15 @@

import argparse
import os
import sys

if os.getcwd() not in sys.path:
sys.path.append(os.getcwd())
import numpy as np

import torch
from aot_utils.oss_utils.utils import build_executorch_binary
from executorch.backends.mediatek import Precision
from executorch.examples.mediatek.aot_utils.oss_utils.utils import (
build_executorch_binary,
)
from executorch.examples.models.edsr import EdsrModel

from PIL import Image
Expand Down
Loading
Loading