Skip to content

Commit 0627693

Browse files
adding performance config option to the AWS ChatBedrock component (#4777)
* adding performance config option to the AWS ChatBedrock component * Update AWSChatBedrock.ts * Update pnpm-lock.yaml --------- Co-authored-by: Henry Heng <[email protected]>
1 parent bbf6970 commit 0627693

File tree

3 files changed

+38515
-38503
lines changed

3 files changed

+38515
-38503
lines changed

packages/components/nodes/chatmodels/AWSBedrock/AWSChatBedrock.ts

Lines changed: 16 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ class AWSChatBedrock_ChatModels implements INode {
2323
constructor() {
2424
this.label = 'AWS ChatBedrock'
2525
this.name = 'awsChatBedrock'
26-
this.version = 6.0
26+
this.version = 6.1
2727
this.type = 'AWSChatBedrock'
2828
this.icon = 'aws.svg'
2929
this.category = 'Chat Models'
@@ -100,6 +100,16 @@ class AWSChatBedrock_ChatModels implements INode {
100100
'Allow image input. Refer to the <a href="https://docs.flowiseai.com/using-flowise/uploads#image" target="_blank">docs</a> for more details.',
101101
default: false,
102102
optional: true
103+
},
104+
{
105+
label: 'Latency Optimized',
106+
name: 'latencyOptimized',
107+
type: 'boolean',
108+
description:
109+
'Enable latency optimized configuration for supported models. Refer to the supported <a href="https://docs.aws.amazon.com/bedrock/latest/userguide/latency-optimized-inference.html" target="_blank">latecny optimized models</a> for more details.',
110+
default: false,
111+
optional: true,
112+
additionalParams: true
103113
}
104114
]
105115
}
@@ -122,6 +132,7 @@ class AWSChatBedrock_ChatModels implements INode {
122132
const iMax_tokens_to_sample = nodeData.inputs?.max_tokens_to_sample as string
123133
const cache = nodeData.inputs?.cache as BaseCache
124134
const streaming = nodeData.inputs?.streaming as boolean
135+
const latencyOptimized = nodeData.inputs?.latencyOptimized as boolean
125136

126137
const obj: ChatBedrockConverseInput = {
127138
region: iRegion,
@@ -131,6 +142,10 @@ class AWSChatBedrock_ChatModels implements INode {
131142
streaming: streaming ?? true
132143
}
133144

145+
if (latencyOptimized) {
146+
obj.performanceConfig = { latency: 'optimized' }
147+
}
148+
134149
/**
135150
* Long-term credentials specified in LLM configuration are optional.
136151
* Bedrock's credential provider falls back to the AWS SDK to fetch

packages/components/package.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,7 +38,7 @@
3838
"@google/generative-ai": "^0.24.0",
3939
"@huggingface/inference": "^2.6.1",
4040
"@langchain/anthropic": "0.3.14",
41-
"@langchain/aws": "0.1.4",
41+
"@langchain/aws": "^0.1.11",
4242
"@langchain/baidu-qianfan": "^0.1.0",
4343
"@langchain/cohere": "^0.0.7",
4444
"@langchain/community": "^0.3.29",

0 commit comments

Comments
 (0)