Skip to content

Commit ec0a03d

Browse files
committed
fix: use autoCondenseContext checkbox instead of 100% threshold check
The issue was that the code was checking for a 100% threshold value to determine if condensing was disabled, when it should have been using the existing "Automatically trigger intelligent context condensing" checkbox (autoCondenseContext boolean). Changes: - When autoCondenseContext is false, no truncation or condensing occurs - Removed the special case for 100% threshold - Updated all related tests to reflect the correct behavior
1 parent 848fd82 commit ec0a03d

File tree

2 files changed

+51
-69
lines changed

2 files changed

+51
-69
lines changed

src/core/sliding-window/__tests__/sliding-window.spec.ts

Lines changed: 46 additions & 63 deletions
Original file line numberDiff line numberDiff line change
@@ -284,7 +284,7 @@ describe("Sliding Window", () => {
284284
})
285285
})
286286

287-
it("should truncate if tokens are above max tokens threshold", async () => {
287+
it("should not truncate if tokens are above max tokens threshold when autoCondenseContext is false", async () => {
288288
const modelInfo = createModelInfo(100000, 30000)
289289
const totalTokens = 70001 // Above threshold
290290

@@ -294,30 +294,23 @@ describe("Sliding Window", () => {
294294
{ ...messages[messages.length - 1], content: "" },
295295
]
296296

297-
// When truncating, always uses 0.5 fraction
298-
// With 4 messages after the first, 0.5 fraction means remove 2 messages
299-
const expectedMessages = [
300-
messagesWithSmallContent[0],
301-
messagesWithSmallContent[3],
302-
messagesWithSmallContent[4],
303-
]
304-
305297
const result = await truncateConversationIfNeeded({
306298
messages: messagesWithSmallContent,
307299
totalTokens,
308300
contextWindow: modelInfo.contextWindow,
309301
maxTokens: modelInfo.maxTokens,
310302
apiHandler: mockApiHandler,
311-
autoCondenseContext: false,
303+
autoCondenseContext: false, // Disabled - should not truncate
312304
autoCondenseContextPercent: 100,
313305
systemPrompt: "System prompt",
314306
taskId,
315307
profileThresholds: {},
316308
currentProfileId: "default",
317309
})
318310

311+
// When autoCondenseContext is false, should NOT truncate
319312
expect(result).toEqual({
320-
messages: expectedMessages,
313+
messages: messagesWithSmallContent, // Original messages preserved
321314
summary: "",
322315
cost: 0,
323316
prevContextTokens: totalTokens,
@@ -343,7 +336,7 @@ describe("Sliding Window", () => {
343336
contextWindow: modelInfo1.contextWindow,
344337
maxTokens: modelInfo1.maxTokens,
345338
apiHandler: mockApiHandler,
346-
autoCondenseContext: false,
339+
autoCondenseContext: false, // Disabled - no truncation
347340
autoCondenseContextPercent: 100,
348341
systemPrompt: "System prompt",
349342
taskId,
@@ -357,7 +350,7 @@ describe("Sliding Window", () => {
357350
contextWindow: modelInfo2.contextWindow,
358351
maxTokens: modelInfo2.maxTokens,
359352
apiHandler: mockApiHandler,
360-
autoCondenseContext: false,
353+
autoCondenseContext: false, // Disabled - no truncation
361354
autoCondenseContextPercent: 100,
362355
systemPrompt: "System prompt",
363356
taskId,
@@ -370,15 +363,15 @@ describe("Sliding Window", () => {
370363
expect(result1.cost).toEqual(result2.cost)
371364
expect(result1.prevContextTokens).toEqual(result2.prevContextTokens)
372365

373-
// Test above threshold
366+
// Test above threshold - with autoCondenseContext false, should not truncate
374367
const aboveThreshold = 70001
375368
const result3 = await truncateConversationIfNeeded({
376369
messages: messagesWithSmallContent,
377370
totalTokens: aboveThreshold,
378371
contextWindow: modelInfo1.contextWindow,
379372
maxTokens: modelInfo1.maxTokens,
380373
apiHandler: mockApiHandler,
381-
autoCondenseContext: false,
374+
autoCondenseContext: false, // Disabled - no truncation
382375
autoCondenseContextPercent: 100,
383376
systemPrompt: "System prompt",
384377
taskId,
@@ -392,14 +385,17 @@ describe("Sliding Window", () => {
392385
contextWindow: modelInfo2.contextWindow,
393386
maxTokens: modelInfo2.maxTokens,
394387
apiHandler: mockApiHandler,
395-
autoCondenseContext: false,
388+
autoCondenseContext: false, // Disabled - no truncation
396389
autoCondenseContextPercent: 100,
397390
systemPrompt: "System prompt",
398391
taskId,
399392
profileThresholds: {},
400393
currentProfileId: "default",
401394
})
402395

396+
// Both should preserve original messages when autoCondenseContext is false
397+
expect(result3.messages).toEqual(messagesWithSmallContent)
398+
expect(result4.messages).toEqual(messagesWithSmallContent)
403399
expect(result3.messages).toEqual(result4.messages)
404400
expect(result3.summary).toEqual(result4.summary)
405401
expect(result3.cost).toEqual(result4.cost)
@@ -463,14 +459,15 @@ describe("Sliding Window", () => {
463459
contextWindow: modelInfo.contextWindow,
464460
maxTokens,
465461
apiHandler: mockApiHandler,
466-
autoCondenseContext: false,
462+
autoCondenseContext: false, // Disabled - no truncation
467463
autoCondenseContextPercent: 100,
468464
systemPrompt: "System prompt",
469465
taskId,
470466
profileThresholds: {},
471467
currentProfileId: "default",
472468
})
473-
expect(resultWithLarge.messages).not.toEqual(messagesWithLargeContent) // Should truncate
469+
// When autoCondenseContext is false, should NOT truncate
470+
expect(resultWithLarge.messages).toEqual(messagesWithLargeContent) // Should NOT truncate
474471
expect(resultWithLarge.summary).toBe("")
475472
expect(resultWithLarge.cost).toBe(0)
476473
expect(resultWithLarge.prevContextTokens).toBe(baseTokensForLarge + largeContentTokens)
@@ -491,20 +488,21 @@ describe("Sliding Window", () => {
491488
contextWindow: modelInfo.contextWindow,
492489
maxTokens,
493490
apiHandler: mockApiHandler,
494-
autoCondenseContext: false,
491+
autoCondenseContext: false, // Disabled - no truncation
495492
autoCondenseContextPercent: 100,
496493
systemPrompt: "System prompt",
497494
taskId,
498495
profileThresholds: {},
499496
currentProfileId: "default",
500497
})
501-
expect(resultWithVeryLarge.messages).not.toEqual(messagesWithVeryLargeContent) // Should truncate
498+
// When autoCondenseContext is false, should NOT truncate
499+
expect(resultWithVeryLarge.messages).toEqual(messagesWithVeryLargeContent) // Should NOT truncate
502500
expect(resultWithVeryLarge.summary).toBe("")
503501
expect(resultWithVeryLarge.cost).toBe(0)
504502
expect(resultWithVeryLarge.prevContextTokens).toBe(baseTokensForVeryLarge + veryLargeContentTokens)
505503
})
506504

507-
it("should truncate if tokens are within TOKEN_BUFFER_PERCENTAGE of the threshold", async () => {
505+
it("should not truncate if tokens are within TOKEN_BUFFER_PERCENTAGE when autoCondenseContext is false", async () => {
508506
const modelInfo = createModelInfo(100000, 30000)
509507
const dynamicBuffer = modelInfo.contextWindow * TOKEN_BUFFER_PERCENTAGE // 10% of 100000 = 10000
510508
const totalTokens = 70000 - dynamicBuffer + 1 // Just within the dynamic buffer of threshold (70000)
@@ -515,29 +513,22 @@ describe("Sliding Window", () => {
515513
{ ...messages[messages.length - 1], content: "" },
516514
]
517515

518-
// When truncating, always uses 0.5 fraction
519-
// With 4 messages after the first, 0.5 fraction means remove 2 messages
520-
const expectedResult = [
521-
messagesWithSmallContent[0],
522-
messagesWithSmallContent[3],
523-
messagesWithSmallContent[4],
524-
]
525-
526516
const result = await truncateConversationIfNeeded({
527517
messages: messagesWithSmallContent,
528518
totalTokens,
529519
contextWindow: modelInfo.contextWindow,
530520
maxTokens: modelInfo.maxTokens,
531521
apiHandler: mockApiHandler,
532-
autoCondenseContext: false,
522+
autoCondenseContext: false, // Disabled - no truncation
533523
autoCondenseContextPercent: 100,
534524
systemPrompt: "System prompt",
535525
taskId,
536526
profileThresholds: {},
537527
currentProfileId: "default",
538528
})
529+
// When autoCondenseContext is false, should NOT truncate
539530
expect(result).toEqual({
540-
messages: expectedResult,
531+
messages: messagesWithSmallContent, // Original messages preserved
541532
summary: "",
542533
cost: 0,
543534
prevContextTokens: totalTokens,
@@ -676,21 +667,13 @@ describe("Sliding Window", () => {
676667
{ ...messages[messages.length - 1], content: "" },
677668
]
678669

679-
// When truncating, always uses 0.5 fraction
680-
// With 4 messages after the first, 0.5 fraction means remove 2 messages
681-
const expectedMessages = [
682-
messagesWithSmallContent[0],
683-
messagesWithSmallContent[3],
684-
messagesWithSmallContent[4],
685-
]
686-
687670
const result = await truncateConversationIfNeeded({
688671
messages: messagesWithSmallContent,
689672
totalTokens,
690673
contextWindow: modelInfo.contextWindow,
691674
maxTokens: modelInfo.maxTokens,
692675
apiHandler: mockApiHandler,
693-
autoCondenseContext: false,
676+
autoCondenseContext: false, // Disabled - should not truncate
694677
autoCondenseContextPercent: 50, // This shouldn't matter since autoCondenseContext is false
695678
systemPrompt: "System prompt",
696679
taskId,
@@ -701,9 +684,9 @@ describe("Sliding Window", () => {
701684
// Verify summarizeConversation was not called
702685
expect(summarizeSpy).not.toHaveBeenCalled()
703686

704-
// Verify it used truncation
687+
// When autoCondenseContext is false, should NOT truncate even if above threshold
705688
expect(result).toEqual({
706-
messages: expectedMessages,
689+
messages: messagesWithSmallContent, // Original messages preserved
707690
summary: "",
708691
cost: 0,
709692
prevContextTokens: totalTokens,
@@ -822,7 +805,7 @@ describe("Sliding Window", () => {
822805
summarizeSpy.mockRestore()
823806
})
824807

825-
it("should not truncate when autoCondenseContext is true and threshold is 100% even if tokens exceed allowedTokens", async () => {
808+
it("should not truncate when autoCondenseContext is false even if tokens exceed allowedTokens", async () => {
826809
const modelInfo = createModelInfo(100000, 30000)
827810
const totalTokens = 75000 // This exceeds allowedTokens (60000) but should not truncate when disabled
828811

@@ -840,18 +823,18 @@ describe("Sliding Window", () => {
840823
contextWindow: modelInfo.contextWindow,
841824
maxTokens: modelInfo.maxTokens,
842825
apiHandler: mockApiHandler,
843-
autoCondenseContext: true, // Enabled but with 100% threshold
844-
autoCondenseContextPercent: 100, // 100% threshold means never condense
826+
autoCondenseContext: false, // Disabled - should not truncate
827+
autoCondenseContextPercent: 50, // This shouldn't matter since autoCondenseContext is false
845828
systemPrompt: "System prompt",
846829
taskId,
847830
profileThresholds: {},
848831
currentProfileId: "default",
849832
})
850833

851-
// Verify summarizeConversation was NOT called when threshold is 100%
834+
// Verify summarizeConversation was NOT called
852835
expect(summarizeSpy).not.toHaveBeenCalled()
853836

854-
// Should NOT truncate even though tokens exceed allowedTokens when threshold is 100%
837+
// Should NOT truncate even though tokens exceed allowedTokens when autoCondenseContext is false
855838
expect(result).toEqual({
856839
messages: messagesWithSmallContent,
857840
summary: "",
@@ -1123,22 +1106,22 @@ describe("Sliding Window", () => {
11231106
prevContextTokens: 39999,
11241107
})
11251108

1126-
// Above max tokens - truncate
1109+
// Above max tokens - but with autoCondenseContext false, should not truncate
11271110
const result2 = await truncateConversationIfNeeded({
11281111
messages: messagesWithSmallContent,
11291112
totalTokens: 50001, // Above threshold
11301113
contextWindow: modelInfo.contextWindow,
11311114
maxTokens: modelInfo.maxTokens,
11321115
apiHandler: mockApiHandler,
1133-
autoCondenseContext: false,
1116+
autoCondenseContext: false, // Disabled - no truncation
11341117
autoCondenseContextPercent: 100,
11351118
systemPrompt: "System prompt",
11361119
taskId,
11371120
profileThresholds: {},
11381121
currentProfileId: "default",
11391122
})
1140-
expect(result2.messages).not.toEqual(messagesWithSmallContent)
1141-
expect(result2.messages.length).toBe(3) // Truncated with 0.5 fraction
1123+
// When autoCondenseContext is false, should NOT truncate
1124+
expect(result2.messages).toEqual(messagesWithSmallContent)
11421125
expect(result2.summary).toBe("")
11431126
expect(result2.cost).toBe(0)
11441127
expect(result2.prevContextTokens).toBe(50001)
@@ -1176,22 +1159,22 @@ describe("Sliding Window", () => {
11761159
prevContextTokens: 81807,
11771160
})
11781161

1179-
// Above max tokens - truncate
1162+
// Above max tokens - but with autoCondenseContext false, should not truncate
11801163
const result2 = await truncateConversationIfNeeded({
11811164
messages: messagesWithSmallContent,
11821165
totalTokens: 81809, // Above threshold (81808)
11831166
contextWindow: modelInfo.contextWindow,
11841167
maxTokens: modelInfo.maxTokens,
11851168
apiHandler: mockApiHandler,
1186-
autoCondenseContext: false,
1169+
autoCondenseContext: false, // Disabled - no truncation
11871170
autoCondenseContextPercent: 100,
11881171
systemPrompt: "System prompt",
11891172
taskId,
11901173
profileThresholds: {},
11911174
currentProfileId: "default",
11921175
})
1193-
expect(result2.messages).not.toEqual(messagesWithSmallContent)
1194-
expect(result2.messages.length).toBe(3) // Truncated with 0.5 fraction
1176+
// When autoCondenseContext is false, should NOT truncate
1177+
expect(result2.messages).toEqual(messagesWithSmallContent)
11951178
expect(result2.summary).toBe("")
11961179
expect(result2.cost).toBe(0)
11971180
expect(result2.prevContextTokens).toBe(81809)
@@ -1223,22 +1206,22 @@ describe("Sliding Window", () => {
12231206
})
12241207
expect(result1.messages).toEqual(messagesWithSmallContent)
12251208

1226-
// Above max tokens - truncate
1209+
// Above max tokens - but with autoCondenseContext false, should not truncate
12271210
const result2 = await truncateConversationIfNeeded({
12281211
messages: messagesWithSmallContent,
12291212
totalTokens: 40001, // Above threshold
12301213
contextWindow: modelInfo.contextWindow,
12311214
maxTokens: modelInfo.maxTokens,
12321215
apiHandler: mockApiHandler,
1233-
autoCondenseContext: false,
1216+
autoCondenseContext: false, // Disabled - no truncation
12341217
autoCondenseContextPercent: 100,
12351218
systemPrompt: "System prompt",
12361219
taskId,
12371220
profileThresholds: {},
12381221
currentProfileId: "default",
12391222
})
1240-
expect(result2).not.toEqual(messagesWithSmallContent)
1241-
expect(result2.messages.length).toBe(3) // Truncated with 0.5 fraction
1223+
// When autoCondenseContext is false, should NOT truncate
1224+
expect(result2.messages).toEqual(messagesWithSmallContent)
12421225
})
12431226

12441227
it("should handle large context windows appropriately", async () => {
@@ -1268,22 +1251,22 @@ describe("Sliding Window", () => {
12681251
})
12691252
expect(result1.messages).toEqual(messagesWithSmallContent)
12701253

1271-
// Above max tokens - truncate
1254+
// Above max tokens - but with autoCondenseContext false, should not truncate
12721255
const result2 = await truncateConversationIfNeeded({
12731256
messages: messagesWithSmallContent,
12741257
totalTokens: 170001, // Above threshold
12751258
contextWindow: modelInfo.contextWindow,
12761259
maxTokens: modelInfo.maxTokens,
12771260
apiHandler: mockApiHandler,
1278-
autoCondenseContext: false,
1261+
autoCondenseContext: false, // Disabled - no truncation
12791262
autoCondenseContextPercent: 100,
12801263
systemPrompt: "System prompt",
12811264
taskId,
12821265
profileThresholds: {},
12831266
currentProfileId: "default",
12841267
})
1285-
expect(result2).not.toEqual(messagesWithSmallContent)
1286-
expect(result2.messages.length).toBe(3) // Truncated with 0.5 fraction
1268+
// When autoCondenseContext is false, should NOT truncate
1269+
expect(result2.messages).toEqual(messagesWithSmallContent)
12871270
})
12881271
})
12891272
})

src/core/sliding-window/index.ts

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -142,6 +142,7 @@ export async function truncateConversationIfNeeded({
142142
}
143143
// If no specific threshold is found for the profile, fall back to global setting
144144

145+
// Only apply condensing if autoCondenseContext is enabled
145146
if (autoCondenseContext && effectiveThreshold < 100) {
146147
const contextPercent = (100 * prevContextTokens) / contextWindow
147148
if (contextPercent >= effectiveThreshold || prevContextTokens > allowedTokens) {
@@ -166,15 +167,13 @@ export async function truncateConversationIfNeeded({
166167
}
167168

168169
// Fall back to sliding window truncation if needed
169-
// Exception: When context condensing is explicitly disabled (threshold = 100%), don't truncate
170+
// When autoCondenseContext is false, don't truncate - user has explicitly disabled context management
170171
if (prevContextTokens > allowedTokens) {
171-
// Check if condensing is explicitly disabled (threshold is 100% and autoCondenseContext is true)
172-
// This means the user has set the threshold to 100% to disable condensing
173-
if (autoCondenseContext && effectiveThreshold >= 100) {
174-
// Context condensing is explicitly disabled by user, don't truncate
172+
if (!autoCondenseContext) {
173+
// Context condensing is disabled by the checkbox, don't truncate
175174
return { messages, summary: "", cost, prevContextTokens, error }
176175
}
177-
// Apply sliding window truncation in all other cases
176+
// Apply sliding window truncation only when condensing is enabled but failed or threshold not reached
178177
const truncatedMessages = truncateConversation(messages, 0.5, taskId)
179178
return { messages: truncatedMessages, prevContextTokens, summary: "", cost, error }
180179
}

0 commit comments

Comments
 (0)