Skip to content

Commit 3245cf9

Browse files
committed
Update
1 parent a5a172a commit 3245cf9

File tree

2 files changed

+65
-44
lines changed

2 files changed

+65
-44
lines changed

graph_net/analysis.py

Lines changed: 29 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -66,10 +66,10 @@ def analysis(args):
6666
data = {"Compiler": [], "log2(speedup)": []}
6767

6868
# A: CINN (Simulate)
69-
data["log2(speedup)"].extend(
70-
np.random.normal(loc=0.35, scale=0.2, size=num_samples_per_compiler)
71-
)
72-
data["Compiler"].extend(["CINN"] * num_samples_per_compiler)
69+
# data["log2(speedup)"].extend(
70+
# np.random.normal(loc=0.35, scale=0.2, size=num_samples_per_compiler)
71+
# )
72+
# data["Compiler"].extend(["CINN"] * num_samples_per_compiler)
7373

7474
# B: torch.inductor
7575
# inductor_log = os.path.join(args.test_compiler_log_file)
@@ -80,37 +80,37 @@ def analysis(args):
8080
data["Compiler"].extend(["torch.inductor"] * len(log2_speedups))
8181

8282
# C: tvm (Simulate)
83-
data["log2(speedup)"].extend(
84-
np.random.normal(loc=0.3, scale=0.15, size=num_samples_per_compiler)
85-
)
86-
data["Compiler"].extend(["tvm"] * num_samples_per_compiler)
83+
# data["log2(speedup)"].extend(
84+
# np.random.normal(loc=0.3, scale=0.15, size=num_samples_per_compiler)
85+
# )
86+
# data["Compiler"].extend(["tvm"] * num_samples_per_compiler)
8787

8888
# D: XLA (Simulate)
89-
data["log2(speedup)"].extend(
90-
np.concatenate(
91-
[
92-
np.random.normal(
93-
loc=-0.5, scale=0.1, size=int(num_samples_per_compiler * 0.6)
94-
),
95-
np.random.normal(
96-
loc=0.2, scale=0.2, size=int(num_samples_per_compiler * 0.4)
97-
),
98-
]
99-
)
100-
)
101-
data["Compiler"].extend(["XLA"] * num_samples_per_compiler)
89+
# data["log2(speedup)"].extend(
90+
# np.concatenate(
91+
# [
92+
# np.random.normal(
93+
# loc=-0.5, scale=0.1, size=int(num_samples_per_compiler * 0.6)
94+
# ),
95+
# np.random.normal(
96+
# loc=0.2, scale=0.2, size=int(num_samples_per_compiler * 0.4)
97+
# ),
98+
# ]
99+
# )
100+
# )
101+
# data["Compiler"].extend(["XLA"] * num_samples_per_compiler)
102102

103103
# E: TensorRT (Simulate)
104-
data["log2(speedup)"].extend(
105-
np.random.normal(loc=0.5, scale=0.1, size=num_samples_per_compiler)
106-
)
107-
data["Compiler"].extend(["TensorRT"] * num_samples_per_compiler)
104+
# data["log2(speedup)"].extend(
105+
# np.random.normal(loc=0.5, scale=0.1, size=num_samples_per_compiler)
106+
# )
107+
# data["Compiler"].extend(["TensorRT"] * num_samples_per_compiler)
108108

109109
# F: BladeDISC (Simulate)
110-
data["log2(speedup)"].extend(
111-
np.random.normal(loc=0.05, scale=0.3, size=num_samples_per_compiler)
112-
)
113-
data["Compiler"].extend(["BladeDISC"] * num_samples_per_compiler)
110+
# data["log2(speedup)"].extend(
111+
# np.random.normal(loc=0.05, scale=0.3, size=num_samples_per_compiler)
112+
# )
113+
# data["Compiler"].extend(["BladeDISC"] * num_samples_per_compiler)
114114

115115
df = pd.DataFrame(data)
116116
df["Compiler"] = pd.Categorical(df["Compiler"], categories=compilers, ordered=True)

graph_net/benchmark_demo.sh

Lines changed: 36 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,41 @@
11
#!/bin/bash
2+
benchmark_dir="/work/GraphNet/benchmark_logs"
3+
mkdir -p "${benchmark_dir}"
24

3-
for i in /work/GraphNet/samples/torchvision/*/; do
4-
dir_name=$(basename "${i%/}")
5-
6-
echo "Processing ${dir_name}"
7-
8-
python -m graph_net.torch.test_compiler \
9-
--model-path "${i}" \
10-
--compiler "inductor" \
11-
--warmup 3 \
12-
--trials 10 \
13-
--device cuda \
14-
--output-dir "/work/GraphNet/benchmark_logs"
15-
16-
done > torchvision_cuda.log 2>&1
5+
global_log="${benchmark_dir}/global_0830.log"
6+
> "$global_log"
7+
8+
echo "[$(date)] Script started in background (PID: $$)" | tee -a "$global_log"
9+
{
10+
valid_packages=("timm" "torchaudio" "torchgeometric" "torchvision" "transformers-auto-model" "ultralytics")
11+
for i in /work/GraphNet/samples/*/; do
12+
package_name=$(basename "${i%/}")
13+
if [[ " ${valid_packages[*]} " == *" ${package_name} "* ]]; then
14+
echo "[$(date)] Processing package: ${package_name}" | tee -a "$global_log"
15+
for j in "$i"*/; do
16+
model_name=$(basename "${j%/}")
17+
18+
echo "Processing model: ${model_name}"
19+
20+
python -m graph_net.torch.test_compiler \
21+
--model-path "${j}" \
22+
--compiler "inductor" \
23+
--warmup 3 \
24+
--trials 10 \
25+
--device cuda \
26+
--output-dir "${benchmark_dir}/${package_name}"
27+
28+
done
29+
else
30+
echo "[$(date)] Skipping package (not in valid list): ${package_name}" | tee -a "$global_log"
31+
fi
32+
done
33+
echo "[$(date)] Script completed" | tee -a "$global_log"
34+
} >> "$global_log" 2>&1
35+
36+
37+
# nohup bash /work/GraphNet/graph_net/benchmark_demo.sh > /dev/ 2>&1 &
1738

1839
# python3 -m graph_net.analysis --test-compiler-log-file /work/GraphNet/torchvision_cuda.log
1940

20-
python3 -m graph_net.analysis --benchmark-path /work/GraphNet/benchmark
41+
# python3 -m graph_net.analysis --benchmark-path "${benchmark_dir}"

0 commit comments

Comments
 (0)