forked from ruvnet/RuVector
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathtest-full.mjs
More file actions
142 lines (118 loc) · 5.35 KB
/
test-full.mjs
File metadata and controls
142 lines (118 loc) · 5.35 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
#!/usr/bin/env node
/**
* Full end-to-end test with model download
*
* Downloads all-MiniLM-L6-v2 and runs embedding tests
*/
import { ModelLoader, MODELS, DEFAULT_MODEL } from './loader.js';
import {
WasmEmbedder,
WasmEmbedderConfig,
cosineSimilarity,
} from './pkg/ruvector_onnx_embeddings_wasm.js';
console.log('🧪 RuVector ONNX Embeddings WASM - Full E2E Test\n');
console.log('='.repeat(60));
// List available models
console.log('\n📦 Available Models:');
ModelLoader.listModels().forEach(m => {
const isDefault = m.id === DEFAULT_MODEL ? ' ⭐ DEFAULT' : '';
console.log(` • ${m.id} (${m.dimension}d, ${m.size})${isDefault}`);
console.log(` ${m.description}`);
});
console.log('\n' + '='.repeat(60));
console.log(`\n🔄 Loading model: ${DEFAULT_MODEL}...\n`);
// Load model with progress
const loader = new ModelLoader({
cache: false, // Disable cache for testing
onProgress: ({ loaded, total, percent }) => {
process.stdout.write(`\r Progress: ${percent}% (${(loaded/1024/1024).toFixed(1)}MB / ${(total/1024/1024).toFixed(1)}MB)`);
}
});
try {
const { modelBytes, tokenizerJson, config } = await loader.loadModel(DEFAULT_MODEL);
console.log('\n');
console.log(` ✅ Model loaded: ${config.name}`);
console.log(` ✅ Model size: ${(modelBytes.length / 1024 / 1024).toFixed(2)} MB`);
console.log(` ✅ Tokenizer size: ${(tokenizerJson.length / 1024).toFixed(2)} KB`);
// Create embedder
console.log('\n🔧 Creating embedder...');
const embedderConfig = new WasmEmbedderConfig()
.setMaxLength(config.maxLength)
.setNormalize(true)
.setPooling(0);
const embedder = WasmEmbedder.withConfig(modelBytes, tokenizerJson, embedderConfig);
console.log(` ✅ Embedder created`);
console.log(` ✅ Dimension: ${embedder.dimension()}`);
console.log(` ✅ Max length: ${embedder.maxLength()}`);
// Test 1: Single embedding
console.log('\n' + '='.repeat(60));
console.log('\n📝 Test 1: Single Embedding');
const text1 = "The quick brown fox jumps over the lazy dog.";
console.log(` Input: "${text1}"`);
const start1 = performance.now();
const embedding1 = embedder.embedOne(text1);
const time1 = performance.now() - start1;
console.log(` ✅ Output dimension: ${embedding1.length}`);
console.log(` ✅ First 5 values: [${Array.from(embedding1.slice(0, 5)).map(v => v.toFixed(4)).join(', ')}]`);
console.log(` ✅ Time: ${time1.toFixed(2)}ms`);
// Test 2: Semantic similarity
console.log('\n' + '='.repeat(60));
console.log('\n📝 Test 2: Semantic Similarity');
const pairs = [
["I love programming in Rust", "Rust is my favorite programming language"],
["The weather is nice today", "It's sunny outside"],
["I love programming in Rust", "The weather is nice today"],
["Machine learning is fascinating", "AI and deep learning are interesting"],
];
for (const [a, b] of pairs) {
const start = performance.now();
const sim = embedder.similarity(a, b);
const time = performance.now() - start;
const label = sim > 0.5 ? '🟢 Similar' : '🔴 Different';
console.log(`\n "${a.substring(0, 30)}..."`);
console.log(` "${b.substring(0, 30)}..."`);
console.log(` ${label}: ${sim.toFixed(4)} (${time.toFixed(1)}ms)`);
}
// Test 3: Batch embedding
console.log('\n' + '='.repeat(60));
console.log('\n📝 Test 3: Batch Embedding');
const texts = [
"Artificial intelligence is transforming technology.",
"Machine learning models learn from data.",
"Deep learning uses neural networks.",
"Vector databases enable semantic search.",
];
console.log(` Embedding ${texts.length} texts...`);
const start3 = performance.now();
const batchEmbeddings = embedder.embedBatch(texts);
const time3 = performance.now() - start3;
const embeddingDim = embedder.dimension();
const numEmbeddings = batchEmbeddings.length / embeddingDim;
console.log(` ✅ Total values: ${batchEmbeddings.length}`);
console.log(` ✅ Embeddings: ${numEmbeddings} x ${embeddingDim}d`);
console.log(` ✅ Time: ${time3.toFixed(2)}ms (${(time3/texts.length).toFixed(2)}ms per text)`);
// Compute pairwise similarities
console.log('\n Pairwise similarities:');
for (let i = 0; i < numEmbeddings; i++) {
for (let j = i + 1; j < numEmbeddings; j++) {
const emb_i = batchEmbeddings.slice(i * embeddingDim, (i + 1) * embeddingDim);
const emb_j = batchEmbeddings.slice(j * embeddingDim, (j + 1) * embeddingDim);
const sim = cosineSimilarity(emb_i, emb_j);
console.log(` [${i}] vs [${j}]: ${sim.toFixed(4)}`);
}
}
// Summary
console.log('\n' + '='.repeat(60));
console.log('\n✅ All tests passed!');
console.log('='.repeat(60));
console.log('\n📊 Performance Summary:');
console.log(` • Model: ${config.name}`);
console.log(` • Dimension: ${embeddingDim}`);
console.log(` • Single embed: ~${time1.toFixed(0)}ms`);
console.log(` • Batch (4 texts): ~${time3.toFixed(0)}ms`);
console.log(` • Throughput: ~${(1000 / (time3/texts.length)).toFixed(0)} texts/sec`);
} catch (error) {
console.error('\n❌ Error:', error.message);
console.error(error.stack);
process.exit(1);
}