Skip to content

Commit 78b7455

Browse files
authored
Add metadata to create message (#3832)
1 parent 503c758 commit 78b7455

22 files changed

+162
-45
lines changed

.changeset/hot-lies-flash.md

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,5 @@
1+
---
2+
"roo-cline": patch
3+
---
4+
5+
Add metadata to create message

src/api/index.ts

Lines changed: 10 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,8 +29,17 @@ export interface SingleCompletionHandler {
2929
completePrompt(prompt: string): Promise<string>
3030
}
3131

32+
export interface ApiHandlerCreateMessageMetadata {
33+
mode?: string
34+
taskId: string
35+
}
36+
3237
export interface ApiHandler {
33-
createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream
38+
createMessage(
39+
systemPrompt: string,
40+
messages: Anthropic.Messages.MessageParam[],
41+
metadata?: ApiHandlerCreateMessageMetadata,
42+
): ApiStream
3443

3544
getModel(): { id: string; info: ModelInfo }
3645

src/api/providers/anthropic-vertex.ts

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@ import { getModelParams } from "../transform/model-params"
1111

1212
import { ANTHROPIC_DEFAULT_MAX_TOKENS } from "./constants"
1313
import { BaseProvider } from "./base-provider"
14-
import type { SingleCompletionHandler } from "../index"
14+
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
1515

1616
// https://docs.anthropic.com/en/api/claude-on-vertex-ai
1717
export class AnthropicVertexHandler extends BaseProvider implements SingleCompletionHandler {
@@ -50,7 +50,11 @@ export class AnthropicVertexHandler extends BaseProvider implements SingleComple
5050
}
5151
}
5252

53-
override async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
53+
override async *createMessage(
54+
systemPrompt: string,
55+
messages: Anthropic.Messages.MessageParam[],
56+
metadata?: ApiHandlerCreateMessageMetadata,
57+
): ApiStream {
5458
let {
5559
id,
5660
info: { supportsPromptCache },

src/api/providers/anthropic.ts

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -15,7 +15,7 @@ import { getModelParams } from "../transform/model-params"
1515

1616
import { ANTHROPIC_DEFAULT_MAX_TOKENS } from "./constants"
1717
import { BaseProvider } from "./base-provider"
18-
import type { SingleCompletionHandler } from "../index"
18+
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
1919

2020
export class AnthropicHandler extends BaseProvider implements SingleCompletionHandler {
2121
private options: ApiHandlerOptions
@@ -34,7 +34,11 @@ export class AnthropicHandler extends BaseProvider implements SingleCompletionHa
3434
})
3535
}
3636

37-
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
37+
async *createMessage(
38+
systemPrompt: string,
39+
messages: Anthropic.Messages.MessageParam[],
40+
metadata?: ApiHandlerCreateMessageMetadata,
41+
): ApiStream {
3842
let stream: AnthropicStream<Anthropic.Messages.RawMessageStreamEvent>
3943
const cacheControl: CacheControlEphemeral = { type: "ephemeral" }
4044
let { id: modelId, betas = [], maxTokens, temperature, reasoning: thinking } = this.getModel()

src/api/providers/base-openai-compatible-provider.ts

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ import { ApiHandlerOptions, ModelInfo } from "../../shared/api"
55
import { ApiStream } from "../transform/stream"
66
import { convertToOpenAiMessages } from "../transform/openai-format"
77

8-
import { SingleCompletionHandler } from "../index"
8+
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
99
import { DEFAULT_HEADERS } from "./constants"
1010
import { BaseProvider } from "./base-provider"
1111

@@ -60,7 +60,11 @@ export abstract class BaseOpenAiCompatibleProvider<ModelName extends string>
6060
})
6161
}
6262

63-
override async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
63+
override async *createMessage(
64+
systemPrompt: string,
65+
messages: Anthropic.Messages.MessageParam[],
66+
metadata?: ApiHandlerCreateMessageMetadata,
67+
): ApiStream {
6468
const {
6569
id: model,
6670
info: { maxTokens: max_tokens },

src/api/providers/base-provider.ts

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,15 +2,19 @@ import { Anthropic } from "@anthropic-ai/sdk"
22

33
import { ModelInfo } from "../../shared/api"
44

5-
import { ApiHandler } from "../index"
5+
import type { ApiHandler, ApiHandlerCreateMessageMetadata } from "../index"
66
import { ApiStream } from "../transform/stream"
77
import { countTokens } from "../../utils/countTokens"
88

99
/**
1010
* Base class for API providers that implements common functionality.
1111
*/
1212
export abstract class BaseProvider implements ApiHandler {
13-
abstract createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream
13+
abstract createMessage(
14+
systemPrompt: string,
15+
messages: Anthropic.Messages.MessageParam[],
16+
metadata?: ApiHandlerCreateMessageMetadata,
17+
): ApiStream
1418
abstract getModel(): { id: string; info: ModelInfo }
1519

1620
/**

src/api/providers/bedrock.ts

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,6 @@ import {
99
} from "@aws-sdk/client-bedrock-runtime"
1010
import { fromIni } from "@aws-sdk/credential-providers"
1111
import { Anthropic } from "@anthropic-ai/sdk"
12-
import { SingleCompletionHandler } from "../"
1312
import {
1413
BedrockModelId,
1514
ModelInfo as SharedModelInfo,
@@ -26,6 +25,7 @@ import { MultiPointStrategy } from "../transform/cache-strategy/multi-point-stra
2625
import { ModelInfo as CacheModelInfo } from "../transform/cache-strategy/types"
2726
import { AMAZON_BEDROCK_REGION_INFO } from "../../shared/aws_regions"
2827
import { convertToBedrockConverseMessages as sharedConverter } from "../transform/bedrock-converse-format"
28+
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
2929

3030
const BEDROCK_DEFAULT_TEMPERATURE = 0.3
3131
const BEDROCK_MAX_TOKENS = 4096
@@ -189,7 +189,11 @@ export class AwsBedrockHandler extends BaseProvider implements SingleCompletionH
189189
this.client = new BedrockRuntimeClient(clientConfig)
190190
}
191191

192-
override async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
192+
override async *createMessage(
193+
systemPrompt: string,
194+
messages: Anthropic.Messages.MessageParam[],
195+
metadata?: ApiHandlerCreateMessageMetadata,
196+
): ApiStream {
193197
let modelConfig = this.getModel()
194198
// Handle cross-region inference
195199
const usePromptCache = Boolean(this.options.awsUsePromptCache && this.supportsAwsPromptCache(modelConfig))
@@ -769,7 +773,7 @@ export class AwsBedrockHandler extends BaseProvider implements SingleCompletionH
769773
> = {
770774
ACCESS_DENIED: {
771775
patterns: ["access", "denied", "permission"],
772-
messageTemplate: `You don't have access to the model specified.
776+
messageTemplate: `You don't have access to the model specified.
773777
774778
Please verify:
775779
1. Try cross-region inference if you're using a foundation model

src/api/providers/fake-ai.ts

Lines changed: 12 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import { Anthropic } from "@anthropic-ai/sdk"
2-
import { ApiHandler, SingleCompletionHandler } from ".."
32
import { ApiHandlerOptions, ModelInfo } from "../../shared/api"
43
import { ApiStream } from "../transform/stream"
4+
import type { ApiHandler, SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
55

66
interface FakeAI {
77
/**
@@ -18,7 +18,11 @@ interface FakeAI {
1818
*/
1919
removeFromCache?: () => void
2020

21-
createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream
21+
createMessage(
22+
systemPrompt: string,
23+
messages: Anthropic.Messages.MessageParam[],
24+
metadata?: ApiHandlerCreateMessageMetadata,
25+
): ApiStream
2226
getModel(): { id: string; info: ModelInfo }
2327
countTokens(content: Array<Anthropic.Messages.ContentBlockParam>): Promise<number>
2428
completePrompt(prompt: string): Promise<string>
@@ -52,8 +56,12 @@ export class FakeAIHandler implements ApiHandler, SingleCompletionHandler {
5256
this.ai = cachedFakeAi
5357
}
5458

55-
async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
56-
yield* this.ai.createMessage(systemPrompt, messages)
59+
async *createMessage(
60+
systemPrompt: string,
61+
messages: Anthropic.Messages.MessageParam[],
62+
metadata?: ApiHandlerCreateMessageMetadata,
63+
): ApiStream {
64+
yield* this.ai.createMessage(systemPrompt, messages, metadata)
5765
}
5866

5967
getModel(): { id: string; info: ModelInfo } {

src/api/providers/gemini.ts

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ import type { JWTInput } from "google-auth-library"
1010
import { ApiHandlerOptions, ModelInfo, GeminiModelId, geminiDefaultModelId, geminiModels } from "../../shared/api"
1111
import { safeJsonParse } from "../../shared/safeJsonParse"
1212

13-
import { SingleCompletionHandler } from "../index"
13+
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
1414
import { convertAnthropicContentToGemini, convertAnthropicMessageToGemini } from "../transform/gemini-format"
1515
import type { ApiStream } from "../transform/stream"
1616
import { BaseProvider } from "./base-provider"
@@ -54,7 +54,11 @@ export class GeminiHandler extends BaseProvider implements SingleCompletionHandl
5454
: new GoogleGenAI({ apiKey })
5555
}
5656

57-
async *createMessage(systemInstruction: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
57+
async *createMessage(
58+
systemInstruction: string,
59+
messages: Anthropic.Messages.MessageParam[],
60+
metadata?: ApiHandlerCreateMessageMetadata,
61+
): ApiStream {
5862
const { id: model, thinkingConfig, maxOutputTokens, info } = this.getModel()
5963

6064
const contents = messages.map(convertAnthropicMessageToGemini)

src/api/providers/glama.ts

Lines changed: 6 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ import { ApiStream } from "../transform/stream"
99
import { convertToOpenAiMessages } from "../transform/openai-format"
1010
import { addCacheBreakpoints } from "../transform/caching/anthropic"
1111

12-
import { SingleCompletionHandler } from "../index"
12+
import type { SingleCompletionHandler, ApiHandlerCreateMessageMetadata } from "../index"
1313
import { RouterProvider } from "./router-provider"
1414

1515
const GLAMA_DEFAULT_TEMPERATURE = 0
@@ -33,7 +33,11 @@ export class GlamaHandler extends RouterProvider implements SingleCompletionHand
3333
})
3434
}
3535

36-
override async *createMessage(systemPrompt: string, messages: Anthropic.Messages.MessageParam[]): ApiStream {
36+
override async *createMessage(
37+
systemPrompt: string,
38+
messages: Anthropic.Messages.MessageParam[],
39+
metadata?: ApiHandlerCreateMessageMetadata,
40+
): ApiStream {
3741
const { id: modelId, info } = await this.fetchModel()
3842

3943
const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [

0 commit comments

Comments
 (0)