|
| 1 | +#!/usr/bin/env node |
| 2 | + |
| 3 | +// npx tsx scripts/benchmark-task-messages.ts |
| 4 | + |
| 5 | +import * as fs from "fs/promises" |
| 6 | +import * as path from "path" |
| 7 | +import { performance } from "perf_hooks" |
| 8 | + |
| 9 | +// Import only the type |
| 10 | +import type { ClineMessage } from "../src/shared/ExtensionMessage" |
| 11 | + |
| 12 | +// Constants |
| 13 | +const BENCHMARK_DIR = path.join(process.cwd(), "benchmark-test-storage") |
| 14 | +const TASK_ID = "benchmark-test-task" |
| 15 | +const TASK_DIR = path.join(BENCHMARK_DIR, TASK_ID) |
| 16 | + |
| 17 | +// File paths for both implementations |
| 18 | +const JSON_FILE_PATH = path.join(TASK_DIR, "messages.json") |
| 19 | +const JSONL_FILE_PATH = path.join(TASK_DIR, "messages.jsonl") |
| 20 | + |
| 21 | +// Function to create a sample message with much longer text |
| 22 | +const createSampleMessage = (): ClineMessage => { |
| 23 | + // Generate a long text message to better simulate real-world data |
| 24 | + const longText = `This is a much longer test message that simulates a real-world conversation with an AI assistant. |
| 25 | +It contains multiple paragraphs and a significant amount of text to better demonstrate the performance differences |
| 26 | +between JSON and JSONL formats when dealing with larger message sizes. |
| 27 | +
|
| 28 | +When working with large datasets or conversation histories, the efficiency of storage and retrieval becomes increasingly |
| 29 | +important. This benchmark helps quantify those differences by measuring the time it takes to append messages using |
| 30 | +both approaches. |
| 31 | +
|
| 32 | +The JSON approach requires reading the entire file, parsing it into memory, appending the new message, and then |
| 33 | +writing the entire content back to disk. This becomes increasingly expensive as the file grows larger. |
| 34 | +
|
| 35 | +The JSONL approach, on the other hand, simply appends the new message to the end of the file without needing to |
| 36 | +read or parse existing content. This should theoretically provide better performance, especially as the number |
| 37 | +of messages increases. |
| 38 | +
|
| 39 | +This benchmark will help us determine at what point the performance difference becomes significant and whether |
| 40 | +the JSONL approach provides meaningful benefits for our specific use case in the VS Code extension.` |
| 41 | + |
| 42 | + return { |
| 43 | + ts: Date.now(), |
| 44 | + type: "say", |
| 45 | + say: "text", |
| 46 | + text: longText, |
| 47 | + } |
| 48 | +} |
| 49 | + |
| 50 | +// Function to create a directory if it doesn't exist |
| 51 | +async function ensureDirectoryExists(dirPath: string): Promise<void> { |
| 52 | + try { |
| 53 | + await fs.mkdir(dirPath, { recursive: true }) |
| 54 | + } catch (error) { |
| 55 | + console.error(`Error creating directory ${dirPath}:`, error) |
| 56 | + throw error |
| 57 | + } |
| 58 | +} |
| 59 | + |
| 60 | +// Function to create test files with a specified number of messages |
| 61 | +async function createTestFiles(messageCount: number): Promise<void> { |
| 62 | + console.log(`Creating test files with ${messageCount} messages...`) |
| 63 | + |
| 64 | + // Create JSON test file |
| 65 | + const jsonMessages: ClineMessage[] = [] |
| 66 | + for (let i = 0; i < messageCount; i++) { |
| 67 | + jsonMessages.push(createSampleMessage()) |
| 68 | + } |
| 69 | + await fs.writeFile(JSON_FILE_PATH, JSON.stringify(jsonMessages)) |
| 70 | + |
| 71 | + // Create JSONL test file |
| 72 | + const jsonlContent = jsonMessages.map((msg) => JSON.stringify(msg)).join("\n") |
| 73 | + await fs.writeFile(JSONL_FILE_PATH, jsonlContent) |
| 74 | + |
| 75 | + console.log("Test files created successfully.") |
| 76 | +} |
| 77 | + |
| 78 | +// Simplified implementation of saveTaskMessages |
| 79 | +async function saveTaskMessages({ |
| 80 | + messages, |
| 81 | + taskId, |
| 82 | + globalStoragePath, |
| 83 | +}: { |
| 84 | + messages: ClineMessage[] |
| 85 | + taskId: string |
| 86 | + globalStoragePath: string |
| 87 | +}): Promise<void> { |
| 88 | + // For the benchmark, we write directly to the specified file |
| 89 | + const filePath = path.join(globalStoragePath, "messages.json") |
| 90 | + await fs.writeFile(filePath, JSON.stringify(messages)) |
| 91 | +} |
| 92 | + |
| 93 | +// Simplified implementation of appendTaskMessage |
| 94 | +async function appendTaskMessage({ |
| 95 | + message, |
| 96 | + taskId, |
| 97 | + globalStoragePath, |
| 98 | +}: { |
| 99 | + message: ClineMessage |
| 100 | + taskId: string |
| 101 | + globalStoragePath: string |
| 102 | +}): Promise<void> { |
| 103 | + // For the benchmark, we append directly to the specified file |
| 104 | + const filePath = path.join(globalStoragePath, "messages.jsonl") |
| 105 | + await fs.appendFile(filePath, JSON.stringify(message) + "\n") |
| 106 | +} |
| 107 | + |
| 108 | +// Function to benchmark JSON implementation |
| 109 | +async function benchmarkJSON(iterations: number): Promise<number[]> { |
| 110 | + const durations: number[] = [] |
| 111 | + const messages: ClineMessage[] = [] |
| 112 | + |
| 113 | + for (let i = 0; i < iterations; i++) { |
| 114 | + const newMessage = createSampleMessage() |
| 115 | + |
| 116 | + // Benchmark saveTaskMessages |
| 117 | + const start = performance.now() |
| 118 | + messages.push(newMessage) |
| 119 | + await saveTaskMessages({ messages, taskId: TASK_ID, globalStoragePath: TASK_DIR }) |
| 120 | + const end = performance.now() |
| 121 | + |
| 122 | + durations.push(end - start) |
| 123 | + } |
| 124 | + |
| 125 | + return durations |
| 126 | +} |
| 127 | + |
| 128 | +// Function to benchmark JSONL implementation |
| 129 | +async function benchmarkJSONL(iterations: number): Promise<number[]> { |
| 130 | + const durations: number[] = [] |
| 131 | + |
| 132 | + for (let i = 0; i < iterations; i++) { |
| 133 | + const newMessage = createSampleMessage() |
| 134 | + |
| 135 | + // Benchmark appendTaskMessage |
| 136 | + const start = performance.now() |
| 137 | + await appendTaskMessage({ message: newMessage, taskId: TASK_ID, globalStoragePath: TASK_DIR }) |
| 138 | + const end = performance.now() |
| 139 | + |
| 140 | + durations.push(end - start) |
| 141 | + } |
| 142 | + |
| 143 | + return durations |
| 144 | +} |
| 145 | + |
| 146 | +// Function to calculate statistics |
| 147 | +function calculateStats(durations: number[]): { min: number; max: number; avg: number; median: number } { |
| 148 | + const sorted = [...durations].sort((a, b) => a - b) |
| 149 | + return { |
| 150 | + min: sorted[0], |
| 151 | + max: sorted[sorted.length - 1], |
| 152 | + avg: durations.reduce((sum, val) => sum + val, 0) / durations.length, |
| 153 | + median: sorted[Math.floor(sorted.length / 2)], |
| 154 | + } |
| 155 | +} |
| 156 | + |
| 157 | +// Main benchmark function |
| 158 | +async function runBenchmark(): Promise<void> { |
| 159 | + try { |
| 160 | + // Ensure benchmark directory exists |
| 161 | + await ensureDirectoryExists(TASK_DIR) |
| 162 | + |
| 163 | + // Define message counts to test |
| 164 | + const messageCounts = [10, 100, 1000, 10000, 50000] |
| 165 | + // Number of iterations for each test |
| 166 | + const iterations = 10 |
| 167 | + |
| 168 | + // Add a sequential append test |
| 169 | + async function runSequentialTest() { |
| 170 | + console.log("\nRunning Sequential Append Test (100 messages in sequence)...") |
| 171 | + console.log("This test simulates a more realistic scenario where messages are added over time") |
| 172 | + |
| 173 | + // Create empty files |
| 174 | + await fs.writeFile(JSON_FILE_PATH, JSON.stringify([])) |
| 175 | + await fs.writeFile(JSONL_FILE_PATH, "") |
| 176 | + |
| 177 | + // Test JSON sequential append |
| 178 | + const jsonStart = performance.now() |
| 179 | + let jsonMessages: ClineMessage[] = [] |
| 180 | + |
| 181 | + for (let i = 0; i < 100; i++) { |
| 182 | + // For JSON, we need to read the entire file each time |
| 183 | + jsonMessages = JSON.parse(await fs.readFile(JSON_FILE_PATH, "utf8")) |
| 184 | + jsonMessages.push(createSampleMessage()) |
| 185 | + await fs.writeFile(JSON_FILE_PATH, JSON.stringify(jsonMessages)) |
| 186 | + } |
| 187 | + |
| 188 | + const jsonEnd = performance.now() |
| 189 | + const jsonDuration = jsonEnd - jsonStart |
| 190 | + |
| 191 | + // Test JSONL sequential append |
| 192 | + const jsonlStart = performance.now() |
| 193 | + |
| 194 | + for (let i = 0; i < 100; i++) { |
| 195 | + // For JSONL, we just append |
| 196 | + await fs.appendFile(JSONL_FILE_PATH, JSON.stringify(createSampleMessage()) + "\n") |
| 197 | + } |
| 198 | + |
| 199 | + const jsonlEnd = performance.now() |
| 200 | + const jsonlDuration = jsonlEnd - jsonlStart |
| 201 | + |
| 202 | + // Calculate speedup |
| 203 | + const sequentialSpeedup = jsonDuration / jsonlDuration |
| 204 | + |
| 205 | + console.log(`JSON sequential append time: ${jsonDuration.toFixed(2)} ms`) |
| 206 | + console.log(`JSONL sequential append time: ${jsonlDuration.toFixed(2)} ms`) |
| 207 | + console.log(`Sequential append speedup: ${sequentialSpeedup.toFixed(2)}x`) |
| 208 | + } |
| 209 | + |
| 210 | + console.log("Starting benchmark...") |
| 211 | + console.log("=============================================") |
| 212 | + console.log("| Message Count | Implementation | Min (ms) | Max (ms) | Avg (ms) | Median (ms) |") |
| 213 | + console.log("|---------------|---------------|----------|----------|----------|-------------|") |
| 214 | + |
| 215 | + for (const count of messageCounts) { |
| 216 | + // Create test files with the specified number of messages |
| 217 | + await createTestFiles(count) |
| 218 | + |
| 219 | + // Benchmark JSON implementation |
| 220 | + const jsonDurations = await benchmarkJSON(iterations) |
| 221 | + const jsonStats = calculateStats(jsonDurations) |
| 222 | + |
| 223 | + // Reset the files to ensure consistent state |
| 224 | + await createTestFiles(count) |
| 225 | + |
| 226 | + // Benchmark JSONL implementation |
| 227 | + const jsonlDurations = await benchmarkJSONL(iterations) |
| 228 | + const jsonlStats = calculateStats(jsonlDurations) |
| 229 | + |
| 230 | + // Print results |
| 231 | + console.log( |
| 232 | + `| ${count.toString().padEnd(13)} | JSON | ${jsonStats.min.toFixed(2).padEnd(8)} | ${jsonStats.max.toFixed(2).padEnd(8)} | ${jsonStats.avg.toFixed(2).padEnd(8)} | ${jsonStats.median.toFixed(2).padEnd(11)} |`, |
| 233 | + ) |
| 234 | + console.log( |
| 235 | + `| ${" ".padEnd(13)} | JSONL | ${jsonlStats.min.toFixed(2).padEnd(8)} | ${jsonlStats.max.toFixed(2).padEnd(8)} | ${jsonlStats.avg.toFixed(2).padEnd(8)} | ${jsonlStats.median.toFixed(2).padEnd(11)} |`, |
| 236 | + ) |
| 237 | + |
| 238 | + // Calculate and print speedup |
| 239 | + const avgSpeedup = jsonStats.avg / jsonlStats.avg |
| 240 | + console.log(`| ${" ".padEnd(13)} | Speedup | ${avgSpeedup.toFixed(2)}x ${" ".repeat(37)} |`) |
| 241 | + console.log("|---------------|---------------|----------|----------|----------|-------------|") |
| 242 | + } |
| 243 | + |
| 244 | + console.log("Benchmark completed!") |
| 245 | + |
| 246 | + // Run the sequential test |
| 247 | + await runSequentialTest() |
| 248 | + } catch (error) { |
| 249 | + console.error("Error running benchmark:", error) |
| 250 | + } |
| 251 | +} |
| 252 | + |
| 253 | +// Run the benchmark |
| 254 | +runBenchmark() |
0 commit comments