-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmain.py
More file actions
35 lines (27 loc) · 1.12 KB
/
main.py
File metadata and controls
35 lines (27 loc) · 1.12 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import argparse
import sys
from src.engine import InferenceEngine
from src.monitor import print_system_stats
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--model", type=str, default="TinyLlama/TinyLlama-1.1B-Chat-v1.0")
args = parser.parse_args()
print_system_stats()
try:
engine = InferenceEngine(args.model)
# Benchmark for FP16
engine.load_model("fp16")
res_fp16 = engine.run_benchmark()
print(f"FP16: {res_fp16['tps']:.2f} tok/s | VRAM: {res_fp16['vram_gb']:.2f} GB")
# Benchmark for INT4
engine.load_model("int4")
res_int4 = engine.run_benchmark()
print(f"INT4: {res_int4['tps']:.2f} tok/s | VRAM: {res_int4['vram_gb']:.2f} GB")
# Summary
savings = res_fp16['vram_gb'] - res_int4['vram_gb']
print(f"\n[SUCCESS] Quantization saved {savings:.2f} GB VRAM")
except Exception as e:
print(f"\n[ERROR] Failed to run benchmark: {e}")
print("Note: If running locally on Mac, this is expected. Run on Colab GPU.")
if __name__ == "__main__":
main()