Skip to content

Commit a1945ba

Browse files
authored
Unstable to stable (#312)
* add unstable_to_stable backend and check_unstable_api * temp commit * temp commit * fix unstable to stable
1 parent fd025b0 commit a1945ba

File tree

23 files changed

+4863
-32
lines changed

23 files changed

+4863
-32
lines changed

graph_net/torch/backend/unstable_to_stable_backend.py

Lines changed: 35 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -1,31 +1,35 @@
11
import os
22
import torch
3+
import sys
4+
import inspect
35
from .graph_compiler_backend import GraphCompilerBackend
46

57

68
class UnstableToStableBackend(GraphCompilerBackend):
7-
def __call__(self, model):
9+
def __call__(self, model, model_path):
810
# Perform unstable API check before running the model
9-
self.model = model
10-
self.unstable_to_stable()
11-
self.check_unstable_api()
11+
unstable_api = os.getenv("DISALLOWED_UNSTABLE_API", "").strip()
12+
self.unstable_api = unstable_api
13+
self.model_path = model_path
14+
self.unstable_to_stable(model)
15+
self.check_unstable_api(model)
1216
return self.model
1317

1418
"""
15-
TODO: 实现将 self.model 中的不稳定(unstable)API 转换为稳定(stable)API 的逻辑。
16-
API 负责遍历 self.model,并将其中调用的实验性或不稳定接口替换为对应的稳定版本。
17-
注意:此逻辑属于模型编译安全机制的重要组成部分,请勿随意修改或删除。
18-
19-
api命名规范:
20-
<unstable>_to_<stable>
21-
22-
stable api链接:
19+
TODO: Implement logic to convert unstable APIs in `self.model` into their stable counterparts.
20+
This API is responsible for traversing `self.model` and replacing any calls to experimental or unstable interfaces with the corresponding stable versions.
21+
Note: This logic is a critical component of the model compilation safety mechanism—do not modify or remove it without caution.
22+
23+
**API naming convention:**
24+
`<unstable>_to_<stable>`
25+
26+
**Stable API reference link:**
2327
"""
2428

25-
def unstable_to_stable(self):
29+
def unstable_to_stable(self, model):
2630
return
2731

28-
def check_unstable_api(self):
32+
def check_unstable_api(self, model):
2933
"""
3034
Check whether gm contains the API specified in the environment
3135
variable DISALLOWED_UNSTABLE_API. If it does, raise an exception and stop
@@ -35,29 +39,28 @@ def check_unstable_api(self):
3539
This logic is part of the GraphNet compiler safety mechanism.
3640
Do NOT modify, remove, or bypass this check under any circumstances.
3741
"""
38-
unstable_api = os.getenv("DISALLOWED_UNSTABLE_API", "").strip()
39-
if not unstable_api:
40-
return # Skip check if no environment variable is set
4142

42-
from torch.fx import symbolic_trace
43+
# from torch.fx import symbolic_trace
4344

44-
try:
45-
# Convert the model into a static computation graph (FX IR)
46-
traced = symbolic_trace(self.model)
47-
graph_text = str(traced.graph)
48-
except Exception as e:
49-
# In case tracing fails, fallback to textual model dump
50-
graph_text = str(self.model)
45+
# try:
46+
# # Convert the model into a static computation graph (FX IR)
47+
# traced = symbolic_trace(self.model)
48+
# graph_text = str(traced.graph)
49+
# except Exception as e:
50+
# # In case tracing fails, fallback to textual model dump
51+
# graph_text = str(*(self.model))
5152

53+
print(f"model path is: {self.model_path}")
54+
model_file_path = self.model_path + "model.py"
55+
with open(model_file_path, "r", encoding="utf-8") as f:
56+
graph_text = f.read()
5257
# Search for the unstable API substring
53-
if unstable_api in graph_text:
54-
count = graph_text.count(unstable_api)
55-
raise RuntimeError(
56-
f"❌ Detected unstable API '{unstable_api}' '{count}' times in model graph.\n"
57-
f"Please replace it with a stable API before proceeding.\n"
58-
)
58+
if self.unstable_api in graph_text:
59+
count = graph_text.count(self.unstable_api)
60+
print(f"❌unstable_api:{self.unstable_api} occurs {count} times")
61+
sys.exit(-1)
5962
else:
60-
print(f"✅ Model passed: no occurrence of '{unstable_api}' found.")
63+
print(f"✅ Model passed: no occurrence of '{self.unstable_api}' found.")
6164

6265
def synchronize(self):
6366
# Synchronize CUDA operations if available

graph_net/torch/test_compiler.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -289,6 +289,8 @@ def test_single_model(args):
289289
if args.compiler == "xla":
290290
xla_model = get_model(args, "xla")
291291
compiled_model = compiler(xla_model)
292+
elif args.compiler == "unstable_to_stable":
293+
compiled_model = compiler(model, args.model_path)
292294
else:
293295
compiled_model = compiler(model)
294296

plot_unstable_to_stable.sh

Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,79 @@
1+
#!/bin/bash
2+
# 批量运行 GraphNet benchmark for unstable_to_stable (_add_batch_dim)
3+
# 从文件列表中读取模型路径执行编译测试,并将 log 转换为 JSON
4+
if [ -z "$DISALLOWED_UNSTABLE_API" ]; then
5+
echo "❌ 环境变量 DISALLOWED_UNSTABLE_API 未设置!"
6+
echo "请使用: export DISALLOWED_UNSTABLE_API=<target_unstable_api>"
7+
exit 1
8+
fi
9+
10+
# === 配置区 ===
11+
root_dir="/root/GraphNet/todo_works/unstable_api_to_stable_api/${DISALLOWED_UNSTABLE_API}"
12+
file_list="${root_dir}/${DISALLOWED_UNSTABLE_API}_files.txt"
13+
log_file="${root_dir}/log.log"
14+
json_output_dir="${root_dir}/JSON_results"
15+
16+
# 设置环境变量(benchmark 路径)
17+
export GRAPH_NET_BENCHMARK_PATH="$root_dir"
18+
19+
# === 检查输入文件 ===
20+
if [ ! -f "$file_list" ]; then
21+
echo "❌ 文件不存在: $file_list"
22+
exit 1
23+
fi
24+
25+
# === 执行 benchmark ===
26+
echo "🚀 开始执行 benchmark..."
27+
echo "日志将写入: $log_file"
28+
# echo "--------------------------------------" > "$log_file"
29+
30+
if [ -f "$log_file" ]; then
31+
echo "🧹 删除旧的日志文件: $log_file"
32+
rm "$log_file"
33+
fi
34+
35+
while IFS= read -r model_path; do
36+
[ -z "$model_path" ] && continue
37+
38+
echo "▶️ 运行模型: $model_path"
39+
echo ">>> Running model: $model_path"
40+
41+
python -m graph_net.torch.test_compiler \
42+
--model-path "/root/GraphNet/${model_path}/" \
43+
--compiler unstable_to_stable \
44+
>> "$log_file" 2>&1
45+
46+
echo "✅ 完成: $model_path"
47+
echo "--------------------------------------"
48+
done < "$file_list"
49+
50+
echo "🎯 所有模型运行完成,日志保存在: $log_file"
51+
52+
# === 转换 log 为 JSON ===
53+
echo "📦 正在将日志转换为 JSON..."
54+
if [ -d "$json_output_dir" ]; then
55+
echo "🧹 删除旧的 JSON 输出目录: $json_output_dir"
56+
rm -rf "$json_output_dir"
57+
fi
58+
mkdir -p "$json_output_dir"
59+
60+
python -m graph_net.log2json \
61+
--log-file "$log_file" \
62+
--output-dir "$json_output_dir"
63+
64+
if [ $? -eq 0 ]; then
65+
echo "✅ JSON 文件已生成: $json_output_dir"
66+
else
67+
echo "⚠️ log2json 执行失败,请检查 log.log"
68+
fi
69+
70+
echo "📦 正在将JSON转换为结果图"
71+
python -m graph_net.S_analysis \
72+
--benchmark-path $GRAPH_NET_BENCHMARK_PATH/JSON_results/ \
73+
--output-dir $GRAPH_NET_BENCHMARK_PATH \
74+
75+
if [ $? -eq 0 ]; then
76+
echo "✅ 结果图 文件已生成: $GRAPH_NET_BENCHMARK_PATH"
77+
else
78+
echo "❌结果图生成失败"
79+
fi
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
samples/transformers-auto-model/AceInstruct-1.5B
2+
samples/transformers-auto-model/AlphaMaze-v0.2-1.5B
3+
samples/transformers-auto-model/Biggie-SmoLlm-0.4B
4+
samples/transformers-auto-model/EXAONE-4.0-1.2B
5+
samples/transformers-auto-model/SmolLM3-3B
6+
samples/transformers-auto-model/ZR1-1.5B
7+
samples/transformers-auto-model/gemma-3-1b-pt
8+
samples/transformers-auto-model/jhu-clsp_ettin-decoder-150m
9+
samples/transformers-auto-model/jinaai_jina-reranker-m0
10+
samples/transformers-auto-model/openai_whisper-base
Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
samples/torchvision/convnext_base
2+
samples/torchvision/convnext_large
3+
samples/torchvision/convnext_small
4+
samples/torchvision/convnext_tiny
5+
samples/torchvision/efficientnet_b0
6+
samples/torchvision/efficientnet_b1
7+
samples/torchvision/efficientnet_b2
8+
samples/torchvision/efficientnet_b3
9+
samples/torchvision/efficientnet_b4
10+
samples/torchvision/efficientnet_b5
11+
samples/torchvision/efficientnet_b6
12+
samples/torchvision/efficientnet_b7
13+
samples/torchvision/efficientnet_v2_l
14+
samples/torchvision/efficientnet_v2_m
15+
samples/torchvision/efficientnet_v2_s
16+
samples/torchvision/maxvit_t
17+
samples/torchvision/swin_b
18+
samples/torchvision/swin_v2_b
19+
samples/torchvision/swin_v2_s
20+
samples/torchvision/swin_v2_t
21+
samples/transformers-auto-model/42dot_LLM-SFT-1.3B
22+
samples/transformers-auto-model/AceInstruct-1.5B
23+
samples/transformers-auto-model/AlphaMaze-v0.2-1.5B
24+
samples/transformers-auto-model/Biggie-SmoLlm-0.4B
25+
samples/transformers-auto-model/HuggingFaceTB/SmolLM3-3B
26+
samples/transformers-auto-model/Intel_zoedepth-kitti
27+
samples/transformers-auto-model/Intel_zoedepth-nyu
28+
samples/transformers-auto-model/LFM2-350M
29+
samples/transformers-auto-model/LLaMmlein_120M
30+
samples/transformers-auto-model/NDugar_deberta-v2-xlarge-mnli
31+
samples/transformers-auto-model/Qwen1.5-0.5B
32+
samples/transformers-auto-model/Qwen2.5-0.5B
33+
samples/transformers-auto-model/Qwen3-Embedding-0.6B
34+
samples/transformers-auto-model/SmolLM3-3B
35+
samples/transformers-auto-model/TinyLlama-1.1B-step-50K-105b
36+
samples/transformers-auto-model/TinyLlama/TinyLlama-1.1B-Chat-v0.4
37+
samples/transformers-auto-model/Tucano-2b4
38+
samples/transformers-auto-model/ZR1-1.5B
39+
samples/transformers-auto-model/baidu/ERNIE-4.5-0.3B-PT
40+
samples/transformers-auto-model/deepseek-ai/deepseek-coder-1.3b-base
41+
samples/transformers-auto-model/facebook_dpt-dinov2-giant-nyu
42+
samples/transformers-auto-model/facebook_dpt-dinov2-small-kitti
43+
samples/transformers-auto-model/gemma-3-1b-pt
44+
samples/transformers-auto-model/google/gemma-1.1-2b-it
45+
samples/transformers-auto-model/google/gemma-2b-it
46+
samples/transformers-auto-model/google/gemma-3-1b-it
47+
samples/transformers-auto-model/google/gemma-3-270m
48+
samples/transformers-auto-model/hf-tiny-model-private_tiny-random-BlipModel
49+
samples/transformers-auto-model/hf-tiny-model-private_tiny-random-CanineForSequenceClassification
50+
samples/transformers-auto-model/hf-tiny-model-private_tiny-random-PerceiverForImageClassificationConvProcessing
51+
samples/transformers-auto-model/hf-tiny-model-private_tiny-random-PerceiverForImageClassificationFourier
52+
samples/transformers-auto-model/hf-tiny-model-private_tiny-random-PerceiverForImageClassificationLearned
53+
samples/transformers-auto-model/microsoft/Phi-3-mini-4k-instruct
54+
samples/transformers-auto-model/microsoft/Phi-3.5-mini-instruct
55+
samples/transformers-auto-model/microsoft/Phi-4-mini-instruct
56+
samples/transformers-auto-model/microsoft/phi-1
57+
samples/transformers-auto-model/microsoft/phi-1_5
58+
samples/transformers-auto-model/microsoft/phi-2
59+
samples/transformers-auto-model/mooncakex_img2
60+
samples/transformers-auto-model/nli-deberta-v3-base
61+
samples/transformers-auto-model/nli-deberta-v3-small
62+
samples/transformers-auto-model/nli-deberta-v3-xsmall
63+
samples/transformers-auto-model/orca_mini_3b
64+
samples/transformers-auto-model/sarvam-0.5
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
samples/transformers-auto-model/AceInstruct-1.5B
2+
samples/transformers-auto-model/AlphaMaze-v0.2-1.5B
3+
samples/transformers-auto-model/Biggie-SmoLlm-0.4B
4+
samples/transformers-auto-model/EXAONE-4.0-1.2B
5+
samples/transformers-auto-model/SmolLM3-3B
6+
samples/transformers-auto-model/ZR1-1.5B
7+
samples/transformers-auto-model/gemma-3-1b-pt
8+
samples/transformers-auto-model/jhu-clsp_ettin-decoder-150m
9+
samples/transformers-auto-model/jinaai_jina-reranker-m0
10+
samples/transformers-auto-model/openai_whisper-base
Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
samples/transformers-auto-model/EleutherAI_pythia-1b
2+
samples/transformers-auto-model/HuggingFaceTB/SmolLM3-3B
3+
samples/transformers-auto-model/TinyLlama/TinyLlama-1.1B-Chat-v0.4
4+
samples/transformers-auto-model/baidu/ERNIE-4.5-0.3B-PT
5+
samples/transformers-auto-model/deepseek-ai/deepseek-coder-1.3b-base
6+
samples/transformers-auto-model/google/gemma-1.1-2b-it
7+
samples/transformers-auto-model/google/gemma-2b-it
8+
samples/transformers-auto-model/google/gemma-3-1b-it
9+
samples/transformers-auto-model/google/gemma-3-270m
10+
samples/transformers-auto-model/microsoft/Phi-3-mini-4k-instruct
11+
samples/transformers-auto-model/microsoft/phi-1
12+
samples/transformers-auto-model/microsoft/phi-1_5
13+
samples/transformers-auto-model/microsoft/phi-2
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
samples/transformers-auto-model/AceInstruct-1.5B
2+
samples/transformers-auto-model/AlphaMaze-v0.2-1.5B
3+
samples/transformers-auto-model/Biggie-SmoLlm-0.4B
4+
samples/transformers-auto-model/EXAONE-4.0-1.2B
5+
samples/transformers-auto-model/SmolLM3-3B
6+
samples/transformers-auto-model/ZR1-1.5B
7+
samples/transformers-auto-model/gemma-3-1b-pt
8+
samples/transformers-auto-model/jhu-clsp_ettin-decoder-150m
9+
samples/transformers-auto-model/jinaai_jina-reranker-m0
10+
samples/transformers-auto-model/openai_whisper-base
Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
samples/transformers-auto-model/AceInstruct-1.5B
2+
samples/transformers-auto-model/AlphaMaze-v0.2-1.5B
3+
samples/transformers-auto-model/Biggie-SmoLlm-0.4B
4+
samples/transformers-auto-model/EXAONE-4.0-1.2B
5+
samples/transformers-auto-model/SmolLM3-3B
6+
samples/transformers-auto-model/ZR1-1.5B
7+
samples/transformers-auto-model/gemma-3-1b-pt
8+
samples/transformers-auto-model/jhu-clsp_ettin-decoder-150m
9+
samples/transformers-auto-model/jinaai_jina-reranker-m0
10+
samples/transformers-auto-model/openai_whisper-base

0 commit comments

Comments
 (0)