-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathdemo_omni.py
More file actions
346 lines (264 loc) Β· 10.1 KB
/
demo_omni.py
File metadata and controls
346 lines (264 loc) Β· 10.1 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
XERV CRAYON V5.1.0 - OMNI-BACKEND DEMONSTRATION
================================================
This script demonstrates the "Smashing Experience" of Crayon's Omni-Backend.
It showcases:
1. Automatic hardware detection (Auto-Pilot Mode)
2. Manual device override
3. Profile hot-swapping
4. Latency and throughput benchmarks
Usage:
python demo_omni.py
The script will automatically detect your hardware and run appropriate tests.
"""
import time
import sys
import os
import io
# Fix Windows console encoding for emoji support
if sys.platform == "win32":
try:
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8', errors='replace')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8', errors='replace')
except Exception:
pass # If it fails, just continue without emoji
# Add src to path for development
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "src"))
from crayon import CrayonVocab, check_backends, get_version, enable_verbose_logging
def print_banner():
"""Print the demo banner."""
print("=" * 70)
print("ποΈ XERV CRAYON V{} - OMNI-BACKEND DEMO".format(get_version()))
print("=" * 70)
print()
def demo_auto_mode():
"""
AUTO MODE: The "It Just Works" Experience
Crayon automatically detects your hardware and selects the best backend:
- NVIDIA GPU β CUDA engine (parallel kernel execution)
- AMD GPU β ROCm engine (HIP kernel execution)
- Otherwise β CPU engine (AVX2/AVX-512 SIMD)
"""
print("1οΈβ£ INITIALIZING IN AUTO MODE...")
print("-" * 50)
# Enable logging to see device detection
enable_verbose_logging()
# Create vocab with auto-detection
vocab = CrayonVocab(device="auto")
info = vocab.get_info()
print(f"\n π Detection Results:")
print(f" ββ Device: {info['device'].upper()}")
print(f" ββ Backend: {info['backend']}")
print(f" ββ State: {info['device_state']}")
if 'hardware' in info:
print(f" ββ Hardware: {info['hardware'].get('name', 'Unknown')}")
if info['hardware'].get('vram_mb'):
print(f" ββ VRAM: {info['hardware']['vram_mb']} MB")
# Show available backends
backends = check_backends()
available = [k for k, v in backends.items() if v]
print(f"\n π Available Backends: {', '.join(available)}")
# Load default profile
print("\n π¦ Loading 'lite' profile...")
vocab.load_profile("lite")
print(f" β
Profile loaded ({vocab.vocab_size} tokens)")
return vocab
def demo_latency_test(vocab):
"""
LATENCY TEST: The "Instant" Feel
Measures single-string tokenization performance.
CPU mode is optimized for latency with minimal overhead.
"""
print("\n")
print("2οΈβ£ LATENCY TEST (Single String)")
print("-" * 50)
text = "Crayon optimizes tokenization at the silicon level."
# Warm-up (important for JIT and cache warming)
for _ in range(100):
_ = vocab.tokenize(text)
# Timed run
iterations = 10000
start = time.perf_counter()
for _ in range(iterations):
tokens = vocab.tokenize(text)
end = time.perf_counter()
avg_us = ((end - start) / iterations) * 1_000_000
print(f"\n π Input: '{text}'")
print(f" π’ Tokens: {tokens}")
print(f" π Token Count: {len(tokens)}")
print(f" β‘ Average Latency: {avg_us:.2f} Β΅s/call")
print(f" π Iterations: {iterations:,}")
return tokens
def demo_profile_hotswap(vocab):
"""
PROFILE HOT-SWAP: The Context Manager
Demonstrates switching vocabulary profiles on-the-fly.
Useful when processing mixed content.
"""
print("\n")
print("3οΈβ£ CONTEXT SWITCHING (Profile Hot-Swap)")
print("-" * 50)
code_snippet = "def forward(self, x): return torch.matmul(x, w)"
print(f"\n π Code: '{code_snippet}'")
# Tokenize with lite profile
print("\n [LITE Profile] Tokenizing code...")
tokens_lite = vocab.tokenize(code_snippet)
print(f" ββ Result: {len(tokens_lite)} tokens")
# Switch to standard profile
print("\n [STANDARD Profile] Switching context...")
with vocab.using_profile("standard"):
tokens_std = vocab.tokenize(code_snippet)
print(f" ββ Result: {len(tokens_std)} tokens")
print("\n π Automatically reverted to 'lite' profile")
# Verify we're back to lite
current_info = vocab.get_info()
print(f" ββ Current: {current_info.get('active_profile', 'unknown')}")
def demo_batch_throughput(vocab):
"""
BATCH THROUGHPUT: The Parallel Processing Power
Measures batch tokenization performance.
GPU mode excels here with parallel kernel execution.
"""
print("\n")
print("4οΈβ£ BATCH THROUGHPUT TEST")
print("-" * 50)
# Create test batches
base_text = "The quick brown fox jumps over the lazy dog."
batch_sizes = [100, 1000, 10000]
for batch_size in batch_sizes:
batch = [base_text] * batch_size
# Warm-up
_ = vocab.tokenize(batch[:10])
# Timed run
start = time.time()
results = vocab.tokenize(batch)
duration = time.time() - start
total_tokens = sum(len(r) for r in results)
throughput = batch_size / duration
tokens_per_sec = total_tokens / duration
print(f"\n π¦ Batch Size: {batch_size:,}")
print(f" β±οΈ Duration: {duration:.4f}s")
print(f" π Throughput: {throughput:,.0f} docs/sec")
print(f" π Token Rate: {tokens_per_sec:,.0f} tokens/sec")
def demo_gpu_smashing(vocab):
"""
GPU SMASHING: The High-Throughput Experience
If running on GPU, demonstrates the massive parallelism available.
100K+ documents processed in seconds.
"""
print("\n")
print("5οΈβ£ GPU SMASH TEST")
print("-" * 50)
if vocab.device == "cpu":
print("\n βΉοΈ Running in CPU Mode - Skipping GPU stress test")
print(" π‘ To enable: Run on a machine with NVIDIA/AMD GPU")
return
# Massive batch
batch_size = 100_000
base_text = "The quick brown fox jumps over the lazy dog."
print(f"\n π§ Generating {batch_size:,} documents...")
batch = [base_text] * batch_size
print(" π Launching GPU kernel...")
start = time.time()
results = vocab.tokenize(batch)
duration = time.time() - start
total_tokens = sum(len(r) for r in results)
throughput = batch_size / duration
tokens_per_sec = total_tokens / duration
print(f"\n β
Processed {batch_size:,} documents in {duration:.4f}s")
print(f" π₯ Document Throughput: {throughput:,.0f} docs/sec")
print(f" π Token Throughput: {tokens_per_sec:,.0f} tokens/sec")
def demo_encode_decode(vocab):
"""
ENCODE/DECODE: Round-Trip Verification
Demonstrates the decode() functionality for debugging
and understanding tokenization behavior.
"""
print("\n")
print("6οΈβ£ ENCODE/DECODE ROUND-TRIP")
print("-" * 50)
test_text = "Hello, Crayon! Testing the tokenizer."
print(f"\n π Original: '{test_text}'")
# Encode
tokens = vocab.tokenize(test_text)
print(f" π’ Tokens: {tokens}")
# Decode (if JSON available)
try:
decoded = vocab.decode(tokens)
print(f" π€ Decoded: '{decoded}'")
if decoded == test_text:
print(" β
Perfect round-trip!")
else:
print(" β οΈ Minor differences (expected with subword tokenization)")
except RuntimeError as e:
print(f" β οΈ Decode unavailable: {e}")
def demo_device_override():
"""
MANUAL OVERRIDE: Total Control
Demonstrates explicitly selecting a device for specific use cases.
"""
print("\n")
print("7οΈβ£ MANUAL DEVICE OVERRIDE")
print("-" * 50)
backends = check_backends()
print(f"\n π Available: {backends}")
# Force CPU mode
print("\n π΅ Creating CPU-only instance...")
cpu_vocab = CrayonVocab(device="cpu")
cpu_vocab.load_profile("lite")
info = cpu_vocab.get_info()
print(f" ββ Device: {info['device']}")
print(f" ββ Backend: {info['backend']}")
# Quick latency test
text = "Quick CPU test"
start = time.perf_counter()
for _ in range(1000):
_ = cpu_vocab.tokenize(text)
avg_us = ((time.perf_counter() - start) / 1000) * 1_000_000
print(f" ββ Latency: {avg_us:.2f} Β΅s/call")
cpu_vocab.close()
# Try CUDA if available
if backends.get("cuda"):
print("\n π’ Creating CUDA instance...")
cuda_vocab = CrayonVocab(device="cuda")
cuda_vocab.load_profile("lite")
info = cuda_vocab.get_info()
print(f" ββ Device: {info['device']}")
cuda_vocab.close()
# Try ROCm if available
if backends.get("rocm"):
print("\n π΄ Creating ROCm instance...")
rocm_vocab = CrayonVocab(device="rocm")
rocm_vocab.load_profile("lite")
info = rocm_vocab.get_info()
print(f" ββ Device: {info['device']}")
rocm_vocab.close()
def main():
"""Run the complete demo."""
print_banner()
try:
# Main demos
vocab = demo_auto_mode()
demo_latency_test(vocab)
demo_profile_hotswap(vocab)
demo_batch_throughput(vocab)
demo_gpu_smashing(vocab)
demo_encode_decode(vocab)
# Cleanup main vocab
vocab.close()
# Device override demo
demo_device_override()
print("\n")
print("=" * 70)
print("β
ALL DEMOS COMPLETED SUCCESSFULLY!")
print("=" * 70)
except Exception as e:
print(f"\nβ Demo failed: {e}")
import traceback
traceback.print_exc()
return 1
return 0
if __name__ == "__main__":
sys.exit(main())