Skip to content

Commit d060154

Browse files
Add onnx demos
1 parent 79d143d commit d060154

File tree

9 files changed

+333
-0
lines changed

9 files changed

+333
-0
lines changed
Lines changed: 37 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,37 @@
1+
# SPDX-FileCopyrightText: (c) 2026 Tenstorrent AI ULC
2+
#
3+
# SPDX-License-Identifier: Apache-2.0
4+
import tempfile
5+
import forge
6+
from third_party.tt_forge_models.alexnet.image_classification.onnx import ModelLoader, ModelVariant
7+
8+
9+
def run_alexnet_demo_case(variant):
10+
loader = ModelLoader(variant=variant)
11+
with tempfile.TemporaryDirectory() as tmpdir:
12+
13+
# Load model and input
14+
onnx_model = loader.load_model(onnx_tmp_path=tmpdir)
15+
inputs = loader.load_inputs().contiguous()
16+
framework_model = forge.OnnxModule(variant.value, onnx_model)
17+
18+
# Compile the model using Forge
19+
compiled_model = forge.compile(framework_model, [inputs])
20+
21+
# Run inference on Tenstorrent device
22+
output = compiled_model(inputs)
23+
24+
# Print the results
25+
loader.print_cls_results(output)
26+
print("=" * 60, flush=True)
27+
28+
29+
if __name__ == "__main__":
30+
demo_cases = [
31+
ModelVariant.ALEXNET,
32+
ModelVariant.ALEXNET_OSMR_B,
33+
]
34+
35+
# Run each demo case
36+
for variant in demo_cases:
37+
run_alexnet_demo_case(variant)
Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
# SPDX-FileCopyrightText: (c) 2026 Tenstorrent AI ULC
2+
#
3+
# SPDX-License-Identifier: Apache-2.0
4+
import tempfile
5+
import forge
6+
from third_party.tt_forge_models.densenet.image_classification.onnx import ModelLoader, ModelVariant
7+
8+
9+
def run_densenet_demo_case(variant):
10+
loader = ModelLoader(variant=variant)
11+
with tempfile.TemporaryDirectory() as tmpdir:
12+
13+
# Load model and input
14+
onnx_model = loader.load_model(onnx_tmp_path=tmpdir)
15+
inputs = loader.load_inputs().contiguous()
16+
framework_model = forge.OnnxModule(variant.name.lower(), onnx_model)
17+
18+
# Compile the model using Forge
19+
compiled_model = forge.compile(framework_model, [inputs])
20+
21+
# Run inference on Tenstorrent device
22+
output = compiled_model(inputs)
23+
24+
# Print the results
25+
loader.print_cls_results(output)
26+
print("=" * 60, flush=True)
27+
28+
29+
if __name__ == "__main__":
30+
demo_cases = [
31+
ModelVariant.DENSENET121,
32+
ModelVariant.DENSENET161,
33+
ModelVariant.DENSENET169,
34+
ModelVariant.DENSENET201,
35+
]
36+
37+
# Run each demo case
38+
for variant in demo_cases:
39+
run_densenet_demo_case(variant)
Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
# SPDX-FileCopyrightText: (c) 2026 Tenstorrent AI ULC
2+
#
3+
# SPDX-License-Identifier: Apache-2.0
4+
import tempfile
5+
import forge
6+
from third_party.tt_forge_models.efficientnet.image_classification.onnx import ModelLoader, ModelVariant
7+
8+
9+
def run_efficientnet_demo_case(variant):
10+
loader = ModelLoader(variant=variant)
11+
with tempfile.TemporaryDirectory() as tmpdir:
12+
13+
# Load model and input
14+
onnx_model = loader.load_model(onnx_tmp_path=tmpdir)
15+
inputs = loader.load_inputs().contiguous()
16+
framework_model = forge.OnnxModule(variant.value, onnx_model)
17+
18+
# Compile the model using Forge
19+
compiled_model = forge.compile(framework_model, [inputs])
20+
21+
# Run inference on Tenstorrent device
22+
output = compiled_model(inputs)
23+
24+
# Print the results
25+
loader.print_cls_results(output)
26+
print("=" * 60, flush=True)
27+
28+
29+
if __name__ == "__main__":
30+
demo_cases = [
31+
ModelVariant.B0,
32+
ModelVariant.B1,
33+
ModelVariant.TIMM_EFFICIENTNET_B0,
34+
ModelVariant.TIMM_EFFICIENTNET_B4,
35+
ModelVariant.HF_TIMM_EFFICIENTNET_B0_RA_IN1K,
36+
ModelVariant.HF_TIMM_EFFICIENTNET_B4_RA2_IN1K,
37+
ModelVariant.HF_TIMM_EFFICIENTNET_B5_IN12K_FT_IN1K,
38+
ModelVariant.HF_TIMM_TF_EFFICIENTNET_B0_AA_IN1K,
39+
ModelVariant.HF_TIMM_EFFICIENTNETV2_RW_S_RA2_IN1K,
40+
ModelVariant.HF_TIMM_TF_EFFICIENTNETV2_S_IN21K,
41+
]
42+
43+
# Run each demo case
44+
for variant in demo_cases:
45+
run_efficientnet_demo_case(variant)
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
# SPDX-FileCopyrightText: (c) 2026 Tenstorrent AI ULC
2+
#
3+
# SPDX-License-Identifier: Apache-2.0
4+
import tempfile
5+
import forge
6+
from third_party.tt_forge_models.googlenet.image_classification.onnx import ModelLoader, ModelVariant
7+
8+
9+
def run_googlenet_demo_case(variant):
10+
loader = ModelLoader(variant=variant)
11+
with tempfile.TemporaryDirectory() as tmpdir:
12+
13+
# Load model and input
14+
onnx_model = loader.load_model(onnx_tmp_path=tmpdir)
15+
inputs = loader.load_inputs().contiguous()
16+
framework_model = forge.OnnxModule("googlenet", onnx_model)
17+
18+
# Compile the model using Forge
19+
compiled_model = forge.compile(framework_model, [inputs])
20+
21+
# Run inference on Tenstorrent device
22+
output = compiled_model(inputs)
23+
24+
# Print the results
25+
loader.print_cls_results(output)
26+
print("=" * 60, flush=True)
27+
28+
29+
if __name__ == "__main__":
30+
demo_cases = [
31+
ModelVariant.GOOGLENET,
32+
]
33+
34+
# Run each demo case
35+
for variant in demo_cases:
36+
run_googlenet_demo_case(variant)
Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
# SPDX-FileCopyrightText: (c) 2026 Tenstorrent AI ULC
2+
#
3+
# SPDX-License-Identifier: Apache-2.0
4+
import tempfile
5+
import forge
6+
from third_party.tt_forge_models.mobilenetv1.image_classification.onnx import ModelLoader, ModelVariant
7+
8+
9+
def run_mobilenetv1_demo_case(variant):
10+
loader = ModelLoader(variant=variant)
11+
with tempfile.TemporaryDirectory() as tmpdir:
12+
13+
# Load model and input
14+
onnx_model = loader.load_model(onnx_tmp_path=tmpdir)
15+
inputs = loader.load_inputs().contiguous()
16+
framework_model = forge.OnnxModule(variant.value, onnx_model)
17+
18+
# Compile the model using Forge
19+
compiled_model = forge.compile(framework_model, [inputs])
20+
21+
# Run inference on Tenstorrent device
22+
output = compiled_model(inputs)
23+
24+
# Print the results
25+
loader.print_cls_results(output)
26+
print("=" * 60, flush=True)
27+
28+
29+
if __name__ == "__main__":
30+
demo_cases = [
31+
ModelVariant.MOBILENET_V1_GITHUB,
32+
ModelVariant.MOBILENET_V1_075_192_HF,
33+
ModelVariant.MOBILENET_V1_100_224_HF,
34+
]
35+
36+
# Run each demo case
37+
for variant in demo_cases:
38+
run_mobilenetv1_demo_case(variant)
Lines changed: 44 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,44 @@
1+
# SPDX-FileCopyrightText: (c) 2026 Tenstorrent AI ULC
2+
#
3+
# SPDX-License-Identifier: Apache-2.0
4+
import tempfile
5+
import forge
6+
from third_party.tt_forge_models.resnet.image_classification.onnx import ModelLoader, ModelVariant
7+
8+
9+
def run_resnet_onnx(variant):
10+
loader = ModelLoader(variant=variant)
11+
with tempfile.TemporaryDirectory() as tmpdir:
12+
13+
# Load model and input
14+
onnx_model = loader.load_model(onnx_tmp_path=tmpdir)
15+
inputs = loader.load_inputs().contiguous()
16+
framework_model = forge.OnnxModule(variant.value, onnx_model)
17+
18+
# Compile the model using Forge
19+
compiled_model = forge.compile(framework_model, [inputs])
20+
21+
# Run inference on Tenstorrent device
22+
output = compiled_model(inputs)
23+
24+
# Print the results
25+
loader.print_cls_results(output)
26+
print("=" * 60, flush=True)
27+
28+
29+
if __name__ == "__main__":
30+
demo_cases = [
31+
ModelVariant.RESNET_50_HF,
32+
ModelVariant.RESNET_50_HF_HIGH_RES,
33+
ModelVariant.RESNET_50_TIMM,
34+
ModelVariant.RESNET_50_TIMM_HIGH_RES,
35+
ModelVariant.RESNET_18,
36+
ModelVariant.RESNET_34,
37+
ModelVariant.RESNET_50,
38+
ModelVariant.RESNET_101,
39+
ModelVariant.RESNET_152,
40+
]
41+
42+
# Run each demo case
43+
for variant in demo_cases:
44+
run_resnet_onnx(variant)
Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
# SPDX-FileCopyrightText: (c) 2026 Tenstorrent AI ULC
2+
#
3+
# SPDX-License-Identifier: Apache-2.0
4+
import tempfile
5+
import forge
6+
from third_party.tt_forge_models.nbeats.time_series_forecasting.onnx import ModelLoader
7+
8+
9+
def run_nbeats_demo_case():
10+
loader = ModelLoader()
11+
with tempfile.TemporaryDirectory() as tmpdir:
12+
13+
# Load model and input
14+
onnx_model = loader.load_model(onnx_tmp_path=tmpdir)
15+
inputs = loader.load_inputs()
16+
framework_model = forge.OnnxModule("nbeats", onnx_model)
17+
18+
# Compile the model using Forge
19+
compiled_model = forge.compile(framework_model, inputs)
20+
21+
# Run inference on Tenstorrent device
22+
compiled_model(*inputs)
23+
24+
25+
if __name__ == "__main__":
26+
run_nbeats_demo_case()
Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
# SPDX-FileCopyrightText: (c) 2026 Tenstorrent AI ULC
2+
#
3+
# SPDX-License-Identifier: Apache-2.0
4+
import tempfile
5+
import forge
6+
from third_party.tt_forge_models.roberta.sequence_classification.onnx import ModelLoader, ModelVariant
7+
8+
9+
def run_roberta_demo_case(variant):
10+
loader = ModelLoader(variant=variant)
11+
with tempfile.TemporaryDirectory() as tmpdir:
12+
13+
# Load model and input
14+
onnx_model = loader.load_model(onnx_tmp_path=tmpdir)
15+
inputs = loader.load_inputs()
16+
framework_model = forge.OnnxModule(variant.value, onnx_model)
17+
18+
# Compile the model using Forge
19+
compiled_model = forge.compile(framework_model, [inputs])
20+
21+
# Run inference on Tenstorrent device
22+
output = compiled_model(inputs)
23+
24+
# Decode the output
25+
loader.decode_output(output)
26+
print("=" * 60, flush=True)
27+
28+
29+
if __name__ == "__main__":
30+
demo_cases = [
31+
ModelVariant.ROBERTA_BASE_SENTIMENT,
32+
]
33+
for variant in demo_cases:
34+
run_roberta_demo_case(variant)
Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
# SPDX-FileCopyrightText: (c) 2026 Tenstorrent AI ULC
2+
#
3+
# SPDX-License-Identifier: Apache-2.0
4+
import tempfile
5+
import forge
6+
from third_party.tt_forge_models.squeezebert.sequence_classification.onnx import ModelLoader, ModelVariant
7+
8+
9+
def run_squeezebert_demo_case(variant):
10+
loader = ModelLoader(variant=variant)
11+
with tempfile.TemporaryDirectory() as tmpdir:
12+
13+
# Load model and input
14+
onnx_model = loader.load_model(onnx_tmp_path=tmpdir)
15+
inputs = loader.load_inputs()
16+
framework_model = forge.OnnxModule(variant.value, onnx_model)
17+
18+
# Compile the model using Forge
19+
compiled_model = forge.compile(framework_model, [inputs])
20+
21+
# Run inference on Tenstorrent device
22+
output = compiled_model(inputs)
23+
24+
# Decode the output
25+
loader.decode_output(output)
26+
print("=" * 60, flush=True)
27+
28+
29+
if __name__ == "__main__":
30+
demo_cases = [
31+
ModelVariant.MNLI,
32+
]
33+
for variant in demo_cases:
34+
run_squeezebert_demo_case(variant)

0 commit comments

Comments
 (0)