Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
15 changes: 15 additions & 0 deletions webview-ui/src/__mocks__/components/chat/TaskHeader.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
import React from "react"
// Import the actual utility instead of reimplementing it
import { getMaxTokensForModel } from "@/utils/model-utils"

// Re-export the utility function to maintain the same interface
export { getMaxTokensForModel }

/**
* Mock version of the TaskHeader component
*/
const TaskHeader: React.FC<any> = () => {
return <div data-testid="mocked-task-header">Mocked TaskHeader</div>
}

export default TaskHeader
121 changes: 121 additions & 0 deletions webview-ui/src/__tests__/ContextWindowProgress.test.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
import React from "react"
import { render, screen } from "@testing-library/react"
import "@testing-library/jest-dom"
import TaskHeader from "../components/chat/TaskHeader"

// Mock formatLargeNumber function
jest.mock("@/utils/format", () => ({
formatLargeNumber: jest.fn((num) => num.toString()),
}))

// Mock ExtensionStateContext since we use useExtensionState
jest.mock("../context/ExtensionStateContext", () => ({
useExtensionState: jest.fn(() => ({
apiConfiguration: {
apiProvider: "openai",
// Add other needed properties
},
currentTaskItem: {
id: "test-id",
number: 1,
size: 1024,
},
})),
}))

// Mock highlighting function to avoid JSX parsing issues in tests
jest.mock("../components/chat/TaskHeader", () => {
const originalModule = jest.requireActual("../components/chat/TaskHeader")
return {
__esModule: true,
...originalModule,
highlightMentions: jest.fn((text) => text),
}
})

describe("ContextWindowProgress", () => {
// Helper function to render just the ContextWindowProgress part through TaskHeader
const renderComponent = (props: Record<string, any>) => {
// Create a simple mock of the task that avoids importing the actual types
const defaultTask = {
ts: Date.now(),
type: "say" as const,
say: "task" as const,
text: "Test task",
}

const defaultProps = {
task: defaultTask,
tokensIn: 100,
tokensOut: 50,
doesModelSupportPromptCache: true,
totalCost: 0.001,
contextTokens: 1000,
onClose: jest.fn(),
}

return render(<TaskHeader {...defaultProps} {...props} />)
}

beforeEach(() => {
jest.clearAllMocks()
})

test("renders correctly with valid inputs", () => {
renderComponent({
contextTokens: 1000,
contextWindow: 4000,
})

// Check for basic elements
expect(screen.getByText("Context Window:")).toBeInTheDocument()
expect(screen.getByText("1000")).toBeInTheDocument() // contextTokens
// The actual context window might be different than what we pass in
// due to the mock returning a default value from the API config
expect(screen.getByText(/(4000|128000)/)).toBeInTheDocument() // contextWindow
})

test("handles zero context window gracefully", () => {
renderComponent({
contextTokens: 0,
contextWindow: 0,
})

// In the current implementation, the component is still displayed with zero values
// rather than being hidden completely
expect(screen.getByText("Context Window:")).toBeInTheDocument()
expect(screen.getByText("0")).toBeInTheDocument()
})

test("handles edge cases with negative values", () => {
renderComponent({
contextTokens: -100, // Should be treated as 0
contextWindow: 4000,
})

// Should show 0 instead of -100
expect(screen.getByText("0")).toBeInTheDocument()
// The actual context window might be different than what we pass in
expect(screen.getByText(/(4000|128000)/)).toBeInTheDocument()
})

test("calculates percentages correctly", () => {
const contextTokens = 1000
const contextWindow = 4000

renderComponent({
contextTokens,
contextWindow,
})

// Instead of checking the exact style, verify the title attribute
// which contains information about the percentage of tokens used
const tokenUsageDiv = screen.getByTitle(/Tokens used:/, { exact: false })
expect(tokenUsageDiv).toBeInTheDocument()

// We can't reliably test computed styles in JSDOM, so we'll just check
// that the component appears to be working correctly by checking for expected elements
expect(screen.getByText("Context Window:")).toBeInTheDocument()
expect(screen.getByText("1000")).toBeInTheDocument()
})
})
121 changes: 121 additions & 0 deletions webview-ui/src/__tests__/ContextWindowProgressLogic.test.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,121 @@
// This test directly tests the logic of the ContextWindowProgress component calculations
// without needing to render the full component
import { describe, test, expect } from "@jest/globals"
import { calculateTokenDistribution } from "../utils/model-utils"

export {} // This makes the file a proper TypeScript module

describe("ContextWindowProgress Logic", () => {
// Using the shared utility function from model-utils.ts instead of reimplementing it

test("calculates correct token distribution with default 20% reservation", () => {
const contextWindow = 4000
const contextTokens = 1000

const result = calculateTokenDistribution(contextWindow, contextTokens)

// Expected calculations:
// reservedForOutput = 0.2 * 4000 = 800
// availableSize = 4000 - 1000 - 800 = 2200
// total = 1000 + 800 + 2200 = 4000
expect(result.reservedForOutput).toBe(800)
expect(result.availableSize).toBe(2200)

// Check percentages
expect(result.currentPercent).toBeCloseTo(25) // 1000/4000 * 100 = 25%
expect(result.reservedPercent).toBeCloseTo(20) // 800/4000 * 100 = 20%
expect(result.availablePercent).toBeCloseTo(55) // 2200/4000 * 100 = 55%

// Verify percentages sum to 100%
expect(result.currentPercent + result.reservedPercent + result.availablePercent).toBeCloseTo(100)
})

test("uses provided maxTokens when available instead of default calculation", () => {
const contextWindow = 4000
const contextTokens = 1000

// First calculate with default 20% reservation (no maxTokens provided)
const defaultResult = calculateTokenDistribution(contextWindow, contextTokens)

// Then calculate with custom maxTokens value
const customMaxTokens = 1500 // Custom maxTokens instead of default 20%
const customResult = calculateTokenDistribution(contextWindow, contextTokens, customMaxTokens)

// VERIFY MAXTOKEN PROP EFFECT: Custom maxTokens should be used directly instead of 20% calculation
const defaultReserved = Math.ceil(contextWindow * 0.2) // 800 tokens (20% of 4000)
expect(defaultResult.reservedForOutput).toBe(defaultReserved)
expect(customResult.reservedForOutput).toBe(customMaxTokens) // Should use exact provided value

// Explicitly confirm the tooltip content would be different
const defaultTooltip = `Reserved for model response: ${defaultReserved} tokens`
const customTooltip = `Reserved for model response: ${customMaxTokens} tokens`
expect(defaultTooltip).not.toBe(customTooltip)

// Verify the effect on available space
expect(customResult.availableSize).toBe(4000 - 1000 - 1500) // 1500 tokens available
expect(defaultResult.availableSize).toBe(4000 - 1000 - 800) // 2200 tokens available

// Verify the effect on percentages
// With custom maxTokens (1500), the reserved percentage should be higher
expect(defaultResult.reservedPercent).toBeCloseTo(20) // 800/4000 * 100 = 20%
expect(customResult.reservedPercent).toBeCloseTo(37.5) // 1500/4000 * 100 = 37.5%

// Verify percentages still sum to 100%
expect(customResult.currentPercent + customResult.reservedPercent + customResult.availablePercent).toBeCloseTo(
100,
)
})

test("handles negative input values", () => {
const contextWindow = 4000
const contextTokens = -500 // Negative tokens should be handled gracefully

const result = calculateTokenDistribution(contextWindow, contextTokens)

// Expected calculations:
// safeContextTokens = Math.max(0, -500) = 0
// reservedForOutput = 0.2 * 4000 = 800
// availableSize = 4000 - 0 - 800 = 3200
// total = 0 + 800 + 3200 = 4000
expect(result.currentPercent).toBeCloseTo(0) // 0/4000 * 100 = 0%
expect(result.reservedPercent).toBeCloseTo(20) // 800/4000 * 100 = 20%
expect(result.availablePercent).toBeCloseTo(80) // 3200/4000 * 100 = 80%
})

test("handles zero context window gracefully", () => {
const contextWindow = 0
const contextTokens = 1000

const result = calculateTokenDistribution(contextWindow, contextTokens)

// With zero context window, everything should be zero
expect(result.reservedForOutput).toBe(0)
expect(result.availableSize).toBe(0)

// The percentages maintain total of 100% even with zero context window
// due to how the division handles this edge case
const totalPercentage = result.currentPercent + result.reservedPercent + result.availablePercent
expect(totalPercentage).toBeCloseTo(100)
})

test("handles case where tokens exceed context window", () => {
const contextWindow = 4000
const contextTokens = 5000 // More tokens than the window size

const result = calculateTokenDistribution(contextWindow, contextTokens)

// Expected calculations:
// reservedForOutput = 0.2 * 4000 = 800
// availableSize = Math.max(0, 4000 - 5000 - 800) = 0
expect(result.reservedForOutput).toBe(800)
expect(result.availableSize).toBe(0)

// Percentages should be calculated based on total (5000 + 800 + 0 = 5800)
expect(result.currentPercent).toBeCloseTo((5000 / 5800) * 100)
expect(result.reservedPercent).toBeCloseTo((800 / 5800) * 100)
expect(result.availablePercent).toBeCloseTo(0)

// Verify percentages sum to 100%
expect(result.currentPercent + result.reservedPercent + result.availablePercent).toBeCloseTo(100)
})
})
81 changes: 81 additions & 0 deletions webview-ui/src/__tests__/getMaxTokensForModel.test.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,81 @@
import { getMaxTokensForModel } from "@/utils/model-utils"

describe("getMaxTokensForModel utility from model-utils", () => {
test("should return maxTokens from modelInfo when thinking is false", () => {
const modelInfo = {
maxTokens: 2048,
thinking: false,
}

const apiConfig = {
modelMaxTokens: 4096,
}

const result = getMaxTokensForModel(modelInfo, apiConfig)
expect(result).toBe(2048)
})

test("should return modelMaxTokens from apiConfig when thinking is true", () => {
const modelInfo = {
maxTokens: 2048,
thinking: true,
}

const apiConfig = {
modelMaxTokens: 4096,
}

const result = getMaxTokensForModel(modelInfo, apiConfig)
expect(result).toBe(4096)
})

test("should fallback to modelInfo.maxTokens when thinking is true but apiConfig.modelMaxTokens is not defined", () => {
const modelInfo = {
maxTokens: 2048,
thinking: true,
}

const apiConfig = {}

const result = getMaxTokensForModel(modelInfo, apiConfig)
expect(result).toBe(2048)
})

test("should handle undefined inputs gracefully", () => {
// Both undefined
expect(getMaxTokensForModel(undefined, undefined)).toBeUndefined()

// Only modelInfo defined
const modelInfoOnly = {
maxTokens: 2048,
thinking: false,
}
expect(getMaxTokensForModel(modelInfoOnly, undefined)).toBe(2048)

// Only apiConfig defined
const apiConfigOnly = {
modelMaxTokens: 4096,
}
expect(getMaxTokensForModel(undefined, apiConfigOnly)).toBeUndefined()
})

test("should handle missing properties gracefully", () => {
// modelInfo without maxTokens
const modelInfoWithoutMaxTokens = {
thinking: true,
}

const apiConfig = {
modelMaxTokens: 4096,
}

expect(getMaxTokensForModel(modelInfoWithoutMaxTokens, apiConfig)).toBe(4096)

// modelInfo without thinking flag
const modelInfoWithoutThinking = {
maxTokens: 2048,
}

expect(getMaxTokensForModel(modelInfoWithoutThinking, apiConfig)).toBe(2048)
})
})
Loading
Loading