Skip to content

Commit 7511374

Browse files
committed
feat: add token estimation of context
1 parent 4fbfec7 commit 7511374

File tree

10 files changed

+857
-13
lines changed

10 files changed

+857
-13
lines changed

package-lock.json

Lines changed: 10 additions & 3 deletions
Some generated files are not rendered by default. Learn more about customizing how changed files appear on GitHub.

package.json

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@
3131
"webpack-cli": "^5.1.4"
3232
},
3333
"dependencies": {
34-
"marked": "^15.0.4"
34+
"marked": "^15.0.4",
35+
"tiktoken": "^1.0.21"
3536
}
3637
}

src/components/chat/messageHandler.ts

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ import { createAPIClient, type LLMRequest, type APIError } from '../../utils/api
77
import { createRateLimiter, type RateLimiter } from '../../utils/rateLimiter';
88
import { modelManager } from '../../utils/modelManager';
99
import type { ModelType } from '../../config';
10+
import { estimateSimpleTokens } from '../../utils/tokenCounter';
1011

1112
interface TokenUsage {
1213
totalTokens: number;
@@ -71,8 +72,8 @@ function createUserFriendlyErrorMessage(error: APIError): string {
7172
}
7273
}
7374

74-
function estimateTokens(messages: any[]): number {
75-
// Rough estimation: 1 token ≈ 4 characters for text
75+
async function estimateTokensFromMessages(messages: any[], model: string): Promise<number> {
76+
// Use fallback estimation for now to avoid async complexity in rate limiting
7677
let totalChars = 0;
7778

7879
for (const message of messages) {
@@ -83,14 +84,12 @@ function estimateTokens(messages: any[]): number {
8384
if (item.type === 'text' && item.text) {
8485
totalChars += item.text.length;
8586
} else if (item.type === 'image_url') {
86-
// Images typically use more tokens, estimate based on resolution
87-
totalChars += 1000; // Base estimate for image processing
87+
totalChars += 4000; // Estimate for image tokens (roughly 1000 tokens * 4 chars/token)
8888
}
8989
}
9090
}
9191
}
9292

93-
// Add some buffer for response tokens and system overhead
9493
return Math.ceil(totalChars / 4) + 1000;
9594
}
9695

@@ -168,7 +167,7 @@ export async function handleQuestion(question: string, context: string, model?:
168167
// Check rate limits before making the request
169168
const apiUrl = config.apiUrl;
170169
const limiter = getRateLimiter(apiUrl);
171-
const estimatedTokens = estimateTokens(messages);
170+
const estimatedTokens = await estimateTokensFromMessages(messages, selectedModel);
172171

173172
const rateLimitCheck = await limiter.canMakeRequest(estimatedTokens);
174173
if (!rateLimitCheck.allowed) {

src/components/context/contextModes.ts

Lines changed: 41 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -84,9 +84,17 @@ function displayImage(src: string, container: HTMLElement | null): void {
8484
</div>
8585
`;
8686
currentScreenshot = null;
87+
88+
// Trigger token indicator update when image is cleared
89+
const tokenUpdateEvent = new CustomEvent('contextUpdate');
90+
document.dispatchEvent(tokenUpdateEvent);
8791
}
8892
});
8993
}
94+
95+
// Trigger token indicator update when image is displayed
96+
const tokenUpdateEvent = new CustomEvent('contextUpdate');
97+
document.dispatchEvent(tokenUpdateEvent);
9098
}
9199

92100
function isYouTubePage(): boolean {
@@ -316,6 +324,10 @@ function updateElementPreview(): void {
316324
startElementSelection();
317325
});
318326
}
327+
328+
// Trigger token indicator update when element preview is updated
329+
const tokenUpdateEvent = new CustomEvent('contextUpdate');
330+
document.dispatchEvent(tokenUpdateEvent);
319331
}
320332

321333
function showElementSelectionInstructions(): void {
@@ -830,13 +842,21 @@ async function fetchVideoSubtitles(): Promise<void> {
830842
} else {
831843
videoSubtitles = t('sidebar.preview.videoOnly');
832844
}
845+
846+
// Trigger token indicator update when video subtitles are fetched
847+
const tokenUpdateEvent = new CustomEvent('contextUpdate');
848+
document.dispatchEvent(tokenUpdateEvent);
833849
}
834850

835851
function clearPreview(): void {
836852
if (contentPreviewElement) {
837853
contentPreviewElement.textContent = t('sidebar.preview.noSelection');
838854
}
839855
lastSelection = '';
856+
857+
// Trigger token indicator update when context is cleared
858+
const tokenUpdateEvent = new CustomEvent('contextUpdate');
859+
document.dispatchEvent(tokenUpdateEvent);
840860
}
841861

842862
function updatePreview(text: string): void {
@@ -846,6 +866,10 @@ function updatePreview(text: string): void {
846866
? text.substring(0, 50) + '...'
847867
: text;
848868
}
869+
870+
// Trigger token indicator update when context changes
871+
const tokenUpdateEvent = new CustomEvent('contextUpdate');
872+
document.dispatchEvent(tokenUpdateEvent);
849873
}
850874

851875
function handleSelectionChange(): void {
@@ -881,6 +905,10 @@ function handleSelectionChange(): void {
881905
clearPreview();
882906
}
883907
}
908+
909+
// Trigger token indicator update when selection changes
910+
const tokenUpdateEvent = new CustomEvent('contextUpdate');
911+
document.dispatchEvent(tokenUpdateEvent);
884912
}
885913

886914
function setupEventListeners(contentPreview: HTMLElement): void {
@@ -1072,6 +1100,10 @@ async function updateModeUI(mode: ContextMode, screenshotBtn: HTMLElement, dropZ
10721100
} else if (mode === 'video') {
10731101
await updateVideoUI(preview);
10741102
}
1103+
1104+
// Trigger token indicator update when mode UI is updated
1105+
const tokenUpdateEvent = new CustomEvent('contextUpdate');
1106+
document.dispatchEvent(tokenUpdateEvent);
10751107
}
10761108

10771109
export function setupContextModes(): void {
@@ -1124,11 +1156,19 @@ export function setupContextModes(): void {
11241156
console.error('Failed to update mode UI:', error);
11251157
contentPreview.textContent = 'Failed to update content';
11261158
});
1159+
1160+
// Trigger token indicator update when mode changes
1161+
const tokenUpdateEvent = new CustomEvent('contextUpdate');
1162+
document.dispatchEvent(tokenUpdateEvent);
11271163
});
11281164
});
11291165

11301166
// Initialize with page content
1131-
updateModeUI('page', screenshotBtn, dropZone, contentPreview);
1167+
updateModeUI('page', screenshotBtn, dropZone, contentPreview).then(() => {
1168+
// Trigger initial token indicator update
1169+
const tokenUpdateEvent = new CustomEvent('contextUpdate');
1170+
document.dispatchEvent(tokenUpdateEvent);
1171+
});
11321172

11331173
// Set up screenshot button
11341174
screenshotBtn.addEventListener('click', async () => {

src/components/ui/sidebar.ts

Lines changed: 83 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,8 @@ import { handleQuestion } from '../chat/messageHandler';
1010
import { clearChatHistory } from '../chat/chatHistory';
1111
import { modelManager } from '../../utils/modelManager';
1212
import { initializeLanguage, t } from '../../utils/i18n';
13+
import { estimateSimpleTokens, getTokenCountWithStatus, type TokenEstimate } from '../../utils/tokenCounter';
14+
import { simpleTokenEstimate, getSimpleTokenStatus } from '../../utils/simpleTokenCounter';
1315

1416
interface Position {
1517
x: number;
@@ -320,6 +322,10 @@ export async function createSidebar(): Promise<void> {
320322
<div id="ai-answer"></div>
321323
<div class="ai-input-section">
322324
<textarea id="ai-question" placeholder="${t('sidebar.askPlaceholder')}" rows="4"></textarea>
325+
<div class="ai-token-indicator" id="ai-token-indicator">
326+
<span class="ai-token-count" id="ai-token-count">0 tokens</span>
327+
<span class="ai-token-status" id="ai-token-status"></span>
328+
</div>
323329
<div class="ai-bottom-controls">
324330
<button id="ai-ask-button">${t('sidebar.askButton')}</button>
325331
<select id="ai-model-selector" class="ai-model-selector"></select>
@@ -380,6 +386,58 @@ function setupEventListeners(): void {
380386
initializeModelSelector(modelSelector);
381387
}
382388

389+
// Token counting function
390+
async function updateTokenIndicator(): Promise<void> {
391+
const question = questionInput?.value.trim() || '';
392+
const selectedModel = modelSelector?.value as ModelType || 'gpt-4o-mini';
393+
const tokenCount = document.getElementById('ai-token-count');
394+
const tokenStatus = document.getElementById('ai-token-status');
395+
const tokenIndicator = document.getElementById('ai-token-indicator');
396+
397+
if (!tokenCount || !tokenStatus || !tokenIndicator) return;
398+
399+
// if (!question) {
400+
// tokenCount.textContent = '0 tokens';
401+
// tokenStatus.textContent = '';
402+
// tokenIndicator.className = 'ai-token-indicator';
403+
// return;
404+
// }
405+
406+
// Show loading state
407+
tokenCount.textContent = 'Counting...';
408+
tokenStatus.textContent = '';
409+
tokenIndicator.className = 'ai-token-indicator';
410+
411+
try {
412+
const content = getPageContent();
413+
414+
// Try tiktoken first, fallback to simple estimation
415+
try {
416+
const estimate = await estimateSimpleTokens(question, content, selectedModel);
417+
const { text, status } = getTokenCountWithStatus(estimate);
418+
419+
tokenCount.textContent = text;
420+
tokenStatus.textContent = estimate.warning || '';
421+
tokenIndicator.className = `ai-token-indicator ai-token-${status}`;
422+
} catch (tiktokenError) {
423+
console.warn('Tiktoken failed, using simple estimation:', tiktokenError);
424+
425+
// Fallback to simple estimation
426+
const estimate = simpleTokenEstimate(question, content, selectedModel);
427+
const { text, status } = getSimpleTokenStatus(estimate);
428+
429+
tokenCount.textContent = text;
430+
tokenStatus.textContent = estimate.warning || 'Using estimated count';
431+
tokenIndicator.className = `ai-token-indicator ai-token-${status}`;
432+
}
433+
} catch (error) {
434+
console.warn('All token counting methods failed:', error);
435+
tokenCount.textContent = 'Token count unavailable';
436+
tokenStatus.textContent = 'Error loading token counter';
437+
tokenIndicator.className = 'ai-token-indicator';
438+
}
439+
}
440+
383441
// Set initial theme
384442
const prefersDark = window.matchMedia('(prefers-color-scheme: dark)').matches;
385443
let currentTheme: Theme = (localStorage.getItem('theme') as Theme) || (prefersDark ? 'dark' : 'light');
@@ -439,6 +497,31 @@ function setupEventListeners(): void {
439497
}
440498
});
441499

500+
// Update token count when question changes
501+
questionInput?.addEventListener('input', debouncedTokenUpdate);
502+
503+
// Update token count when model changes
504+
modelSelector?.addEventListener('change', debouncedTokenUpdate);
505+
506+
// Debounce function to prevent too many rapid token updates
507+
let tokenUpdateTimeout: number | undefined;
508+
function debouncedTokenUpdate() {
509+
if (tokenUpdateTimeout) {
510+
clearTimeout(tokenUpdateTimeout);
511+
}
512+
tokenUpdateTimeout = window.setTimeout(() => {
513+
updateTokenIndicator();
514+
}, 100); // 100ms debounce
515+
}
516+
517+
// Update token count when context changes
518+
document.addEventListener('contextUpdate', debouncedTokenUpdate);
519+
520+
// Initial token count update
521+
setTimeout(() => {
522+
debouncedTokenUpdate();
523+
}, 200);
524+
442525
// Clear chat confirmation modal
443526
const confirmButton = modal?.querySelector('.ai-confirm-button');
444527
const cancelButton = modal?.querySelector('.ai-cancel-button');

0 commit comments

Comments
 (0)