Skip to content

Commit e1b96f1

Browse files
Merge pull request #14 from ContextLab/claude/fix-demos-XIOn4
Fix critical bugs in Demos 4, 5, and 6 - all demos now fully functional
2 parents 284e799 + ee36f3b commit e1b96f1

36 files changed

+3472
-917
lines changed

demos/01-eliza/index.html

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -165,12 +165,13 @@ <h3>Example Pattern</h3>
165165
</div>
166166

167167
<!-- Load JavaScript modules -->
168-
<script src="js/pattern-matcher.js?v=1766709790"></script>
169-
<script src="js/eliza-engine.js?v=1766709800"></script>
170168
<script src="js/rule-editor.js?v=1766709790"></script>
171169

172170
<!-- Main Application Script -->
173-
<script>
171+
<script type="module">
172+
import { ElizaEngine } from './js/eliza-engine.js';
173+
import { PatternMatcher } from './js/pattern-matcher.js';
174+
174175
// Initialize ELIZA
175176
let eliza;
176177
let ruleEditor;

demos/01-eliza/js/eliza-engine.js

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,9 @@
22
* ELIZA Engine - Main chatbot logic
33
*/
44

5-
class ElizaEngine {
5+
import { PatternMatcher } from './pattern-matcher.js';
6+
7+
export class ElizaEngine {
68
constructor() {
79
this.patternMatcher = new PatternMatcher();
810
this.rules = null;

demos/01-eliza/js/pattern-matcher.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
* Pattern Matcher - Handles pattern matching and decomposition for ELIZA
33
*/
44

5-
class PatternMatcher {
5+
export class PatternMatcher {
66
constructor() {
77
this.debugMode = false;
88
}

demos/02-tokenization/js/tokenizer-comparison.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -163,7 +163,7 @@ async function processTokenizer(name, text, tokenizer) {
163163
tokens.forEach((token, idx) => {
164164
const span = document.createElement('span');
165165
span.className = `token token-${idx % 8}`;
166-
span.textContent = token.replace('▁', '·').replace('Ġ', '·');
166+
span.textContent = token.replace(//g, '·').replace(/Ġ/g, '·');
167167
span.title = `Token ID: ${encoded[idx]}`;
168168
outputDiv.appendChild(span);
169169
});

demos/03-embeddings/index.html

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -568,8 +568,9 @@ <h3>Add Custom Texts</h3>
568568
if (!app.currentData) return;
569569

570570
document.getElementById('stat-total').textContent = app.currentData.texts.length;
571-
document.getElementById('stat-dim').textContent = app.currentData.embeddings[0].length;
572-
document.getElementById('stat-reduced').textContent = app.reducedData ?
571+
document.getElementById('stat-dim').textContent = app.currentData.embeddings && app.currentData.embeddings.length > 0 ?
572+
app.currentData.embeddings[0].length : '-';
573+
document.getElementById('stat-reduced').textContent = app.reducedData && app.reducedData.length > 0 ?
573574
app.reducedData[0].length : '-';
574575

575576
const uniqueCategories = new Set(app.currentData.labels).size;

demos/04-attention/js/attention-extractor.js

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,9 @@ export class AttentionExtractor {
3434
});
3535

3636
console.log('Running model inference...');
37-
const outputs = await this.currentModel(inputs);
37+
const outputs = await this.currentModel(inputs, {
38+
output_attentions: true
39+
});
3840

3941
// Extract attention weights
4042
// Transformers.js returns attentions as a list of tensors
@@ -73,10 +75,8 @@ export class AttentionExtractor {
7375
// Load tokenizer
7476
this.currentTokenizer = await AutoTokenizer.from_pretrained(modelName);
7577

76-
// Load model with attention output enabled
77-
this.currentModel = await AutoModel.from_pretrained(modelName, {
78-
output_attentions: true
79-
});
78+
// Load model (attention output is requested during inference)
79+
this.currentModel = await AutoModel.from_pretrained(modelName);
8080

8181
this.currentModelName = modelName;
8282
console.log(`Model ${modelName} loaded successfully`);

demos/05-transformer/index.html

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
<title>Interactive Transformer Architecture Explorer</title>
77
<link rel="stylesheet" href="css/transformer.css">
88
<script src="https://cdnjs.cloudflare.com/ajax/libs/three.js/r128/three.min.js"></script>
9-
<script src="https://cdn.jsdelivr.net/npm/[email protected]/examples/js/controls/OrbitControls.js"></script>
9+
<script src="https://threejs.org/examples/js/controls/OrbitControls.js"></script>
1010
<script src="https://d3js.org/d3.v7.min.js"></script>
1111
<script src="https://cdnjs.cloudflare.com/ajax/libs/animejs/3.2.1/anime.min.js"></script>
1212
</head>

demos/05-transformer/js/component-explorer.js

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -504,6 +504,27 @@ class TransformerExplorer {
504504
}
505505

506506
init() {
507+
// Check for required dependencies
508+
if (typeof THREE === 'undefined') {
509+
alert('Error: Three.js library failed to load. Please refresh the page.');
510+
return;
511+
}
512+
513+
if (typeof THREE.OrbitControls === 'undefined') {
514+
alert('Error: OrbitControls failed to load. Please refresh the page.');
515+
return;
516+
}
517+
518+
if (typeof d3 === 'undefined') {
519+
alert('Error: D3.js library failed to load. Please refresh the page.');
520+
return;
521+
}
522+
523+
if (typeof anime === 'undefined') {
524+
alert('Error: Anime.js library failed to load. Please refresh the page.');
525+
return;
526+
}
527+
507528
// Initialize 3D architecture
508529
this.architecture3D = new Architecture3D('scene-container');
509530
this.architecture3D.init();

demos/06-gpt-playground/js/text-generator.js

Lines changed: 105 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -261,66 +261,113 @@ class TextGenerationPlayground {
261261
promptSpan.textContent = prompt;
262262
this.elements.generatedText.appendChild(promptSpan);
263263

264-
// Since Transformers.js doesn't expose raw logits easily in streaming mode,
265-
// we'll use a workaround: generate token by token and simulate the process
266-
267-
let currentText = prompt;
268-
let generatedCount = 0;
269-
270-
while (generatedCount < maxLength && !this.shouldStop) {
271-
// Generate next token
272-
const output = await this.model(currentText, {
273-
max_new_tokens: 1,
274-
temperature: strategy === 'greedy' ? 0.001 : temperature,
264+
try {
265+
// Generate all tokens at once for better performance
266+
// Note: Transformers.js doesn't expose raw logits, so we simulate probabilities
267+
const output = await this.model(prompt, {
268+
max_new_tokens: maxLength,
269+
temperature: strategy === 'greedy' ? 0.1 : temperature,
275270
top_k: strategy === 'topk' || strategy === 'combined' ? topK : 0,
276271
top_p: strategy === 'topp' || strategy === 'combined' ? topP : 1.0,
277272
repetition_penalty: repetitionPenalty,
278-
return_full_text: false
273+
do_sample: strategy !== 'greedy',
274+
num_return_sequences: 1
279275
});
280276

281-
if (!output || output.length === 0) break;
277+
if (!output || output.length === 0) {
278+
throw new Error('Model returned no output');
279+
}
282280

283281
const generatedText = output[0].generated_text;
284-
if (!generatedText) break;
282+
if (!generatedText) {
283+
throw new Error('Generated text is empty');
284+
}
285285

286-
// Extract new token
287-
const newToken = generatedText;
288-
currentText += newToken;
286+
// Split the generated text into tokens (approximate)
287+
// This is a simple approximation since we don't have access to the actual tokenization
288+
const fullText = generatedText.substring(prompt.length);
289+
const tokens = this.approximateTokenize(fullText);
289290

290-
// Simulate token info (since we don't have direct access to logits)
291-
const tokenInfo = this.simulateTokenInfo(newToken, strategy, {
292-
temperature,
293-
topK,
294-
topP
295-
});
291+
// Display tokens with simulated animations
292+
for (let i = 0; i < tokens.length && !this.shouldStop; i++) {
293+
const token = tokens[i];
296294

297-
// Update display
298-
this.displayToken(newToken, tokenInfo);
295+
// Simulate token info (since we don't have direct access to logits)
296+
const tokenInfo = this.simulateTokenInfo(token, strategy, {
297+
temperature,
298+
topK,
299+
topP
300+
});
299301

300-
// Update statistics
301-
this.updateStatistics(tokenInfo);
302+
// Update display
303+
this.displayToken(token, tokenInfo);
302304

303-
// Update visualizations
304-
if (this.elements.showProbabilities.checked) {
305-
this.visualizer.displayTokenProbabilities(tokenInfo, generatedCount + 1);
306-
}
305+
// Update statistics
306+
this.updateStatistics(tokenInfo);
307307

308-
if (this.elements.showAlternatives.checked) {
309-
this.visualizer.displayAlternatives(tokenInfo.topTokens);
310-
}
308+
// Update visualizations
309+
if (this.elements.showProbabilities.checked) {
310+
this.visualizer.displayTokenProbabilities(tokenInfo, i + 1);
311+
}
312+
313+
if (this.elements.showAlternatives.checked) {
314+
this.visualizer.displayAlternatives(tokenInfo.topTokens);
315+
}
311316

312-
if (this.elements.showEntropy.checked) {
313-
this.visualizer.updateEntropyChart(tokenInfo.entropy);
317+
if (this.elements.showEntropy.checked) {
318+
this.visualizer.updateEntropyChart(tokenInfo.entropy);
319+
}
320+
321+
// Small delay for visualization effect
322+
await new Promise(resolve => setTimeout(resolve, 50));
314323
}
315324

316-
generatedCount++;
325+
// Final statistics update
326+
this.updateFinalStats();
317327

318-
// Small delay for visualization
319-
await new Promise(resolve => setTimeout(resolve, 50));
328+
} catch (error) {
329+
console.error('Generation error:', error);
330+
throw error;
331+
}
332+
}
333+
334+
/**
335+
* Approximate tokenization for visualization
336+
* Splits text into word-like tokens for display
337+
* @param {string} text - Text to tokenize
338+
* @returns {Array} Array of token strings
339+
*/
340+
approximateTokenize(text) {
341+
// Simple approximation: split on spaces and punctuation boundaries
342+
// This is not the real BPE tokenization, but good enough for visualization
343+
const tokens = [];
344+
let currentToken = '';
345+
346+
for (let i = 0; i < text.length; i++) {
347+
const char = text[i];
348+
349+
if (char === ' ' || char === '\n' || char === '\t') {
350+
if (currentToken) {
351+
tokens.push(currentToken);
352+
currentToken = '';
353+
}
354+
tokens.push(char);
355+
} else if (/[.,!?;:]/.test(char)) {
356+
if (currentToken) {
357+
tokens.push(currentToken);
358+
currentToken = '';
359+
}
360+
tokens.push(char);
361+
} else {
362+
currentToken += char;
363+
}
364+
}
365+
366+
if (currentToken) {
367+
tokens.push(currentToken);
320368
}
321369

322-
// Final statistics update
323-
this.updateFinalStats();
370+
return tokens;
324371
}
325372

326373
/**
@@ -461,8 +508,8 @@ class TextGenerationPlayground {
461508
const maxLength = parseInt(this.elements.maxLength.value);
462509

463510
const strategies = [
464-
{ name: 'Greedy', value: 'greedy', temp: 0.001 },
465-
{ name: 'Temperature (1.0)', value: 'temperature', temp: 1.0 }
511+
{ name: 'Greedy', value: 'greedy', temp: 0.1, doSample: false },
512+
{ name: 'Temperature (1.0)', value: 'temperature', temp: 1.0, doSample: true }
466513
];
467514

468515
document.getElementById('strategyAName').textContent = strategies[0].name;
@@ -471,8 +518,8 @@ class TextGenerationPlayground {
471518
const outputA = document.getElementById('outputA');
472519
const outputB = document.getElementById('outputB');
473520

474-
outputA.textContent = prompt;
475-
outputB.textContent = prompt;
521+
outputA.innerHTML = `<span style="color: #94a3b8;">${prompt}</span>`;
522+
outputB.innerHTML = `<span style="color: #94a3b8;">${prompt}</span>`;
476523

477524
this.elements.generateBtn.disabled = true;
478525

@@ -482,17 +529,26 @@ class TextGenerationPlayground {
482529
this.model(prompt, {
483530
max_new_tokens: maxLength,
484531
temperature: strategies[0].temp,
485-
return_full_text: false
532+
do_sample: strategies[0].doSample,
533+
num_return_sequences: 1
486534
}),
487535
this.model(prompt, {
488536
max_new_tokens: maxLength,
489537
temperature: strategies[1].temp,
490-
return_full_text: false
538+
do_sample: strategies[1].doSample,
539+
num_return_sequences: 1
491540
})
492541
]);
493542

494-
outputA.textContent = prompt + (resultA[0]?.generated_text || '');
495-
outputB.textContent = prompt + (resultB[0]?.generated_text || '');
543+
if (resultA && resultA[0]) {
544+
const textA = resultA[0].generated_text.substring(prompt.length);
545+
outputA.innerHTML = `<span style="color: #94a3b8;">${prompt}</span>${textA}`;
546+
}
547+
548+
if (resultB && resultB[0]) {
549+
const textB = resultB[0].generated_text.substring(prompt.length);
550+
outputB.innerHTML = `<span style="color: #94a3b8;">${prompt}</span>${textB}`;
551+
}
496552

497553
} catch (error) {
498554
console.error('Comparison generation error:', error);

demos/07-rag/css/rag.css

Lines changed: 9 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -139,6 +139,15 @@ header p {
139139
background: #059669;
140140
}
141141

142+
.btn-warning {
143+
background: var(--warning-color);
144+
color: white;
145+
}
146+
147+
.btn-warning:hover {
148+
background: #d97706;
149+
}
150+
142151
.btn:disabled {
143152
opacity: 0.5;
144153
cursor: not-allowed;

0 commit comments

Comments
 (0)