@@ -14,6 +14,11 @@ interface OpenAIResponse {
1414 content ?: string ;
1515 } ;
1616 } > ;
17+ usage ?: {
18+ total_tokens : number ;
19+ prompt_tokens : number ;
20+ completion_tokens : number ;
21+ } ;
1722 error ?: {
1823 message : string ;
1924 } ;
@@ -29,6 +34,27 @@ interface OpenAIRequest {
2934 max_tokens ?: number ;
3035}
3136
37+ interface TokenUsage {
38+ totalTokens : number ;
39+ requestCount : number ;
40+ }
41+
42+ async function updateTokenUsage ( newTokens : number ) : Promise < void > {
43+ const result = await new Promise < { tokenUsage ?: TokenUsage } > ( ( resolve ) => {
44+ chrome . storage . local . get ( [ 'tokenUsage' ] , resolve ) ;
45+ } ) ;
46+
47+ const currentUsage = result . tokenUsage || { totalTokens : 0 , requestCount : 0 } ;
48+ const updatedUsage = {
49+ totalTokens : currentUsage . totalTokens + newTokens ,
50+ requestCount : currentUsage . requestCount + 1
51+ } ;
52+
53+ await new Promise < void > ( ( resolve ) => {
54+ chrome . storage . local . set ( { tokenUsage : updatedUsage } , resolve ) ;
55+ } ) ;
56+ }
57+
3258export async function handleQuestion ( question : string , context : string , model ?: ModelType ) : Promise < void > {
3359 try {
3460 // Check if the question is a shortcut
@@ -50,6 +76,32 @@ export async function handleQuestion(question: string, context: string, model?:
5076 const selectedModel = model || settings . model ;
5177 await addMessage ( 'assistant' , '' , selectedModel ) ;
5278
79+ // First make a non-streaming request to get token usage
80+ const nonStreamingResponse = await fetch ( `${ settings . apiUrl } /chat/completions` , {
81+ method : 'POST' ,
82+ headers : {
83+ 'Content-Type' : 'application/json' ,
84+ 'Authorization' : `Bearer ${ settings . apiKey } `
85+ } ,
86+ body : JSON . stringify ( {
87+ model : selectedModel ,
88+ messages : [
89+ { role : 'system' , content : systemMessage } ,
90+ { role : 'user' , content : finalQuestion }
91+ ] ,
92+ stream : false
93+ } as OpenAIRequest )
94+ } ) ;
95+
96+ if ( ! nonStreamingResponse . ok ) {
97+ const data = await nonStreamingResponse . json ( ) ;
98+ throw new Error ( data . error ?. message || 'Failed to get response from OpenAI' ) ;
99+ }
100+
101+ const nonStreamingData = await nonStreamingResponse . json ( ) ;
102+ const totalTokens = nonStreamingData . usage ?. total_tokens || 0 ;
103+
104+ // Now make the streaming request for the actual response
53105 const response = await fetch ( `${ settings . apiUrl } /chat/completions` , {
54106 method : 'POST' ,
55107 headers : {
@@ -100,15 +152,17 @@ export async function handleQuestion(question: string, context: string, model?:
100152 }
101153 }
102154 }
155+
156+ // Update token usage after successful completion
157+ await updateTokenUsage ( totalTokens ) ;
103158 } catch ( error ) {
104159 console . error ( 'Error reading stream:' , error ) ;
105160 throw error ;
106161 } finally {
107162 reader . releaseLock ( ) ;
108163 }
109-
110164 } catch ( error ) {
111- console . error ( 'Error handling question :' , error ) ;
112- await addMessage ( 'assistant' , `Error: ${ error instanceof Error ? error . message : 'Unknown error occurred' } ` , model || ( await getSettings ( ) ) . model ) ;
165+ console . error ( 'Error in handleQuestion :' , error ) ;
166+ throw error ;
113167 }
114168}
0 commit comments