@@ -20,6 +20,20 @@ interface UnboundUsage extends OpenAI.CompletionUsage {
2020 cache_read_input_tokens ?: number
2121}
2222
23+ type UnboundChatCompletionCreateParamsStreaming = OpenAI . Chat . Completions . ChatCompletionCreateParamsStreaming & {
24+ unbound_metadata : {
25+ originApp : string
26+ taskId ?: string
27+ mode ?: string
28+ }
29+ }
30+
31+ type UnboundChatCompletionCreateParamsNonStreaming = OpenAI . Chat . Completions . ChatCompletionCreateParamsNonStreaming & {
32+ unbound_metadata : {
33+ originApp : string
34+ }
35+ }
36+
2337export class UnboundHandler extends RouterProvider implements SingleCompletionHandler {
2438 constructor ( options : ApiHandlerOptions ) {
2539 super ( {
@@ -60,11 +74,16 @@ export class UnboundHandler extends RouterProvider implements SingleCompletionHa
6074 maxTokens = info . maxTokens ?? undefined
6175 }
6276
63- const requestOptions : OpenAI . Chat . Completions . ChatCompletionCreateParamsStreaming = {
77+ const requestOptions : UnboundChatCompletionCreateParamsStreaming = {
6478 model : modelId . split ( "/" ) [ 1 ] ,
6579 max_tokens : maxTokens ,
6680 messages : openAiMessages ,
6781 stream : true ,
82+ unbound_metadata : {
83+ originApp : "roo-code" ,
84+ taskId : metadata ?. taskId ,
85+ mode : metadata ?. mode ,
86+ } ,
6887 }
6988
7089 if ( this . supportsTemperature ( modelId ) ) {
@@ -108,9 +127,12 @@ export class UnboundHandler extends RouterProvider implements SingleCompletionHa
108127 const { id : modelId , info } = await this . fetchModel ( )
109128
110129 try {
111- const requestOptions : OpenAI . Chat . Completions . ChatCompletionCreateParamsNonStreaming = {
130+ const requestOptions : UnboundChatCompletionCreateParamsNonStreaming = {
112131 model : modelId . split ( "/" ) [ 1 ] ,
113132 messages : [ { role : "user" , content : prompt } ] ,
133+ unbound_metadata : {
134+ originApp : "roo-code" ,
135+ } ,
114136 }
115137
116138 if ( this . supportsTemperature ( modelId ) ) {
0 commit comments