From e31d0c2ecee5247761764a6090c3884efeefbe65 Mon Sep 17 00:00:00 2001 From: beatlevic Date: Thu, 6 Nov 2025 22:07:19 +0100 Subject: [PATCH 1/4] Added AutoCompleteLruCacheInMem --- .../core/autocomplete/CompletionProvider.ts | 6 +- .../util/AutoCompleteLruCacheInMem.test.ts | 184 ++++++++++++++++++ .../util/AutoCompleteLruCacheInMem.ts | 45 +++++ 3 files changed, 232 insertions(+), 3 deletions(-) create mode 100644 src/services/continuedev/core/autocomplete/util/AutoCompleteLruCacheInMem.test.ts create mode 100644 src/services/continuedev/core/autocomplete/util/AutoCompleteLruCacheInMem.ts diff --git a/src/services/continuedev/core/autocomplete/CompletionProvider.ts b/src/services/continuedev/core/autocomplete/CompletionProvider.ts index 12f8a60b89f..f1c2f549988 100644 --- a/src/services/continuedev/core/autocomplete/CompletionProvider.ts +++ b/src/services/continuedev/core/autocomplete/CompletionProvider.ts @@ -14,11 +14,11 @@ import { renderPromptWithTokenLimit } from "./templating/index.js" import { GetLspDefinitionsFunction } from "./types.js" import { AutocompleteDebouncer } from "./util/AutocompleteDebouncer.js" import { AutocompleteLoggingService } from "./util/AutocompleteLoggingService.js" -import { AutocompleteLruCache } from "./util/AutocompleteLruCache.js" +import { AutoCompleteLruCacheInMem } from "./util/AutoCompleteLruCacheInMem.js" import { HelperVars } from "./util/HelperVars.js" import { AutocompleteInput, AutocompleteOutcome } from "./util/types.js" -const autocompleteCache = AutocompleteLruCache.get() +const autocompleteCache = AutoCompleteLruCacheInMem.get() // Errors that can be expected on occasion even during normal functioning should not be shown. // Not worth disrupting the user to tell them that a single autocomplete request didn't go through @@ -29,7 +29,7 @@ const ERRORS_TO_IGNORE = [ ] export class CompletionProvider { - private autocompleteCache = AutocompleteLruCache.get() + private autocompleteCache = AutoCompleteLruCacheInMem.get() public errorsShown: Set = new Set() private bracketMatchingService = new BracketMatchingService() private debouncer = new AutocompleteDebouncer() diff --git a/src/services/continuedev/core/autocomplete/util/AutoCompleteLruCacheInMem.test.ts b/src/services/continuedev/core/autocomplete/util/AutoCompleteLruCacheInMem.test.ts new file mode 100644 index 00000000000..711f6132985 --- /dev/null +++ b/src/services/continuedev/core/autocomplete/util/AutoCompleteLruCacheInMem.test.ts @@ -0,0 +1,184 @@ +import { describe, it, expect, beforeEach } from "vitest" +import { AutoCompleteLruCacheInMem } from "./AutoCompleteLruCacheInMem" + +describe("AutoCompleteLruCacheInMem", () => { + let cache: AutoCompleteLruCacheInMem + + beforeEach(async () => { + cache = await AutoCompleteLruCacheInMem.get() + }) + + describe("basic operations", () => { + it("should store and retrieve a value", async () => { + await cache.put("hello", "world") + const result = await cache.get("hello") + expect(result).toBe("world") + }) + + it("should return undefined for non-existent key", async () => { + const result = await cache.get("nonexistent") + expect(result).toBeUndefined() + }) + + it("should update existing value", async () => { + await cache.put("key", "value1") + await cache.put("key", "value2") + const result = await cache.get("key") + expect(result).toBe("value2") + }) + }) + + describe("exact key matching", () => { + it("should match exact key and return value", async () => { + await cache.put("hello", "world") + const result = await cache.get("hello") + expect(result).toBe("world") + }) + + it("should return undefined when key doesn't match exactly", async () => { + await cache.put("hello", "world") + const result = await cache.get("goodbye") + expect(result).toBeUndefined() + }) + + it("should return undefined for partial key match", async () => { + await cache.put("hello", "world") + const result = await cache.get("hel") + expect(result).toBeUndefined() + }) + + it("should be case sensitive", async () => { + await cache.put("Hello", "World") + const result1 = await cache.get("Hello") + const result2 = await cache.get("hello") + expect(result1).toBe("World") + expect(result2).toBeUndefined() + }) + }) + + describe("LRU eviction", () => { + it("should evict oldest entry when capacity is reached", async () => { + // Create a fresh cache for this test + const testCache = await AutoCompleteLruCacheInMem.get() + + // Fill cache to capacity (1000 entries) + for (let i = 0; i < 1000; i++) { + await testCache.put(`key${i}`, `value${i}`) + } + + // Add one more entry to trigger eviction + await testCache.put("newkey", "newvalue") + + // First entry should be evicted (oldest timestamp) + const result = await testCache.get("key0") + expect(result).toBeUndefined() + + // New entry should exist + const newResult = await testCache.get("newkey") + expect(newResult).toBe("newvalue") + }) + + it("should update timestamp on cache hit", async () => { + // Create a fresh cache for this test + const testCache = await AutoCompleteLruCacheInMem.get() + + // Fill to capacity + for (let i = 0; i < 1000; i++) { + await testCache.put(`key${i}`, `value${i}`) + } + + // Access an early entry to refresh its timestamp + const refreshedValue = await testCache.get("key5") + expect(refreshedValue).toBe("value5") + + // Add new entries to trigger evictions + await testCache.put("new1", "newvalue1") + await testCache.put("new2", "newvalue2") + + // key5 should still exist (refreshed timestamp) + const key5Result = await testCache.get("key5") + expect(key5Result).toBe("value5") + + // key0 should be evicted (oldest timestamp, never accessed) + const key0Result = await testCache.get("key0") + expect(key0Result).toBeUndefined() + }) + }) + + describe("edge cases", () => { + it("should handle empty strings", async () => { + const testCache = await AutoCompleteLruCacheInMem.get() + await testCache.put("", "empty") + const result = await testCache.get("") + expect(result).toBe("empty") + }) + + it("should handle very long strings", async () => { + const testCache = await AutoCompleteLruCacheInMem.get() + const longString = "a".repeat(10000) + await testCache.put(longString, "completion") + const result = await testCache.get(longString) + expect(result).toBe("completion") + }) + + it("should handle special characters", async () => { + await cache.put("const x = {", "foo: 'bar'}") + const result = await cache.get("const x = {") + expect(result).toBe("foo: 'bar'}") + }) + + it("should handle unicode characters", async () => { + await cache.put("emoji 🚀", "rocket") + const result = await cache.get("emoji 🚀") + expect(result).toBe("rocket") + }) + }) + + describe("concurrent operations", () => { + it("should handle concurrent put operations", async () => { + const promises = [] + for (let i = 0; i < 10; i++) { + promises.push(cache.put(`concurrent${i}`, `value${i}`)) + } + await Promise.all(promises) + + // All values should be stored + for (let i = 0; i < 10; i++) { + const result = await cache.get(`concurrent${i}`) + expect(result).toBe(`value${i}`) + } + }) + + it("should handle concurrent get operations", async () => { + await cache.put("shared", "value") + + const promises = [] + for (let i = 0; i < 10; i++) { + promises.push(cache.get("shared")) + } + const results = await Promise.all(promises) + + // All gets should return the same value + results.forEach((result) => { + expect(result).toBe("value") + }) + }) + }) + + describe("multiple cache instances", () => { + it("should create separate cache instances", async () => { + const cache1 = await AutoCompleteLruCacheInMem.get() + const cache2 = await AutoCompleteLruCacheInMem.get() + + await cache1.put("test", "value1") + await cache2.put("test", "value2") + + const result1 = await cache1.get("test") + const result2 = await cache2.get("test") + + // Each instance should have its own data + expect(result1).toBe("value1") + expect(result2).toBe("value2") + }) + }) +}) diff --git a/src/services/continuedev/core/autocomplete/util/AutoCompleteLruCacheInMem.ts b/src/services/continuedev/core/autocomplete/util/AutoCompleteLruCacheInMem.ts new file mode 100644 index 00000000000..d32c486feb0 --- /dev/null +++ b/src/services/continuedev/core/autocomplete/util/AutoCompleteLruCacheInMem.ts @@ -0,0 +1,45 @@ +import { LRUCache } from "lru-cache" + +const MAX_PREFIX_LENGTH = 50000 + +function truncatePrefix(input: string, safety: number = 100): string { + const maxBytes = MAX_PREFIX_LENGTH - safety + let bytes = 0 + let startIndex = 0 + + // Count bytes from the end, keeping the most recent typing + for (let i = input.length - 1; i >= 0; i--) { + bytes += new TextEncoder().encode(input[i]).length + if (bytes > maxBytes) { + startIndex = i + 1 + break + } + } + + return input.substring(startIndex) +} + +export class AutoCompleteLruCacheInMem { + private static capacity = 1000 + private cache: LRUCache + + private constructor() { + this.cache = new LRUCache({ + max: AutoCompleteLruCacheInMem.capacity, + }) + } + + static async get(): Promise { + return new AutoCompleteLruCacheInMem() + } + + async get(prefix: string): Promise { + const truncated = truncatePrefix(prefix) + return this.cache.get(truncated) + } + + async put(prefix: string, completion: string) { + const truncated = truncatePrefix(prefix) + this.cache.set(truncated, completion) + } +} From d602382056778b0dd689c69ddc1da792eb17e5ce Mon Sep 17 00:00:00 2001 From: beatlevic Date: Thu, 6 Nov 2025 23:00:18 +0100 Subject: [PATCH 2/4] Added some fuzzy matching to AutoCompleteLruCacheInMem --- .../util/AutoCompleteLruCacheInMem.test.ts | 63 +++++++++++++++++++ .../util/AutoCompleteLruCacheInMem.ts | 32 +++++++++- 2 files changed, 94 insertions(+), 1 deletion(-) diff --git a/src/services/continuedev/core/autocomplete/util/AutoCompleteLruCacheInMem.test.ts b/src/services/continuedev/core/autocomplete/util/AutoCompleteLruCacheInMem.test.ts index 711f6132985..e0bf497d64e 100644 --- a/src/services/continuedev/core/autocomplete/util/AutoCompleteLruCacheInMem.test.ts +++ b/src/services/continuedev/core/autocomplete/util/AutoCompleteLruCacheInMem.test.ts @@ -56,6 +56,69 @@ describe("AutoCompleteLruCacheInMem", () => { }) }) + describe("fuzzy matching", () => { + it("should return completion when prefix extends a cached key", async () => { + // Cache "c" -> "ontinue" + await cache.put("c", "ontinue") + // Query "co" should return "ntinue" (completion minus what we already have) + const result = await cache.get("co") + expect(result).toBe("ntinue") + }) + + it("should prefer longest matching key", async () => { + // Cache multiple overlapping keys + await cache.put("h", "ello world") + await cache.put("he", "llo world") + await cache.put("hel", "lo world") + + // Query "hello" should match "hel" (longest key) + // User typed "hello" = "hel" + "lo", cached completion is "lo world" + // So return " world" (the part not yet typed) + const result = await cache.get("hello") + expect(result).toBe(" world") + }) + + it("should validate cached completion starts correctly", async () => { + // Cache "c" -> "ontinue" + await cache.put("c", "ontinue") + // Query "cx" doesn't match the completion pattern, should return undefined + const result = await cache.get("cx") + expect(result).toBeUndefined() + }) + + it("should return exact match if available", async () => { + // Cache both exact and partial keys + await cache.put("co", "mplete") + await cache.put("c", "ontinue") + + // Exact match should be preferred + const result = await cache.get("co") + expect(result).toBe("mplete") + }) + + it("should handle multiple partial matches correctly", async () => { + // Cache overlapping prefixes + await cache.put("fun", "ction") + await cache.put("f", "unction") + + // Query "func" should match "fun" (longest) and return "ction" + const result = await cache.get("func") + expect(result).toBe("tion") + }) + + it("should return undefined when no fuzzy match exists", async () => { + await cache.put("hello", "world") + // "goodbye" doesn't start with "hello" + const result = await cache.get("goodbye") + expect(result).toBeUndefined() + }) + + it("should handle empty cache for fuzzy matching", async () => { + const result = await cache.get("anyprefix") + expect(result).toBeUndefined() + }) + }) + describe("LRU eviction", () => { it("should evict oldest entry when capacity is reached", async () => { // Create a fresh cache for this test diff --git a/src/services/continuedev/core/autocomplete/util/AutoCompleteLruCacheInMem.ts b/src/services/continuedev/core/autocomplete/util/AutoCompleteLruCacheInMem.ts index d32c486feb0..693d39444da 100644 --- a/src/services/continuedev/core/autocomplete/util/AutoCompleteLruCacheInMem.ts +++ b/src/services/continuedev/core/autocomplete/util/AutoCompleteLruCacheInMem.ts @@ -35,7 +35,37 @@ export class AutoCompleteLruCacheInMem { async get(prefix: string): Promise { const truncated = truncatePrefix(prefix) - return this.cache.get(truncated) + + // First try exact match (faster) + const exactMatch = this.cache.get(truncated) + if (exactMatch !== undefined) { + return exactMatch + } + + // Then try fuzzy matching - find keys where prefix starts with the key + // If the query is "co" and we have "c" -> "ontinue" in the cache, + // we should return "ntinue" as the completion. + // Have to make sure we take the key with shortest length + let bestMatch: { key: string; value: string } | null = null + let longestKeyLength = 0 + + for (const [key, value] of this.cache.entries()) { + // Check if truncated prefix starts with this key + if (truncated.startsWith(key) && key.length > longestKeyLength) { + bestMatch = { key, value } + longestKeyLength = key.length + } + } + + if (bestMatch) { + // Validate that the cached completion is a valid completion for the prefix + if (bestMatch.value.startsWith(truncated.slice(bestMatch.key.length))) { + // Return the portion of the value that extends beyond the current prefix + return bestMatch.value.slice(truncated.length - bestMatch.key.length) + } + } + + return undefined } async put(prefix: string, completion: string) { From 614b69ae0f7f9b4f3676f846779f846462790d6a Mon Sep 17 00:00:00 2001 From: beatlevic Date: Thu, 6 Nov 2025 23:05:32 +0100 Subject: [PATCH 3/4] Small changes to AutoCompleteLruCacheInMem to be in line with AutoCompleteLruCache --- .../core/autocomplete/util/AutoCompleteLruCacheInMem.ts | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/services/continuedev/core/autocomplete/util/AutoCompleteLruCacheInMem.ts b/src/services/continuedev/core/autocomplete/util/AutoCompleteLruCacheInMem.ts index 693d39444da..2134653a0f6 100644 --- a/src/services/continuedev/core/autocomplete/util/AutoCompleteLruCacheInMem.ts +++ b/src/services/continuedev/core/autocomplete/util/AutoCompleteLruCacheInMem.ts @@ -45,7 +45,7 @@ export class AutoCompleteLruCacheInMem { // Then try fuzzy matching - find keys where prefix starts with the key // If the query is "co" and we have "c" -> "ontinue" in the cache, // we should return "ntinue" as the completion. - // Have to make sure we take the key with shortest length + // Have to make sure we take the key with longest length for best match let bestMatch: { key: string; value: string } | null = null let longestKeyLength = 0 @@ -60,6 +60,8 @@ export class AutoCompleteLruCacheInMem { if (bestMatch) { // Validate that the cached completion is a valid completion for the prefix if (bestMatch.value.startsWith(truncated.slice(bestMatch.key.length))) { + // Update LRU timestamp for the matched key by accessing it + this.cache.get(bestMatch.key) // Return the portion of the value that extends beyond the current prefix return bestMatch.value.slice(truncated.length - bestMatch.key.length) } From 420b416add60a531cc5b2b6681587a9090ae70f6 Mon Sep 17 00:00:00 2001 From: beatlevic Date: Fri, 7 Nov 2025 20:40:58 +0100 Subject: [PATCH 4/4] Use only instance-level private autocompleteCache --- .../continuedev/core/autocomplete/CompletionProvider.ts | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/services/continuedev/core/autocomplete/CompletionProvider.ts b/src/services/continuedev/core/autocomplete/CompletionProvider.ts index f1c2f549988..0ef6f220e19 100644 --- a/src/services/continuedev/core/autocomplete/CompletionProvider.ts +++ b/src/services/continuedev/core/autocomplete/CompletionProvider.ts @@ -18,7 +18,8 @@ import { AutoCompleteLruCacheInMem } from "./util/AutoCompleteLruCacheInMem.js" import { HelperVars } from "./util/HelperVars.js" import { AutocompleteInput, AutocompleteOutcome } from "./util/types.js" -const autocompleteCache = AutoCompleteLruCacheInMem.get() +// Sqlite AutoCompleteLruCache also has constant initialization +// const autocompleteCache = AutoCompleteLruCache.get() // Errors that can be expected on occasion even during normal functioning should not be shown. // Not worth disrupting the user to tell them that a single autocomplete request didn't go through @@ -184,7 +185,7 @@ export class CompletionProvider { // Completion let completion: string | undefined = "" - const cache = await autocompleteCache + const cache = await this.autocompleteCache const cachedCompletion = helper.options.useCache ? await cache.get(helper.prunedPrefix) : undefined let cacheHit = false if (cachedCompletion) {