Skip to content

Commit ac9d732

Browse files
Updated the Groq chat node and also Llama Index Groq node. #4383 (#4384)
* adding DeepSeekr1 distill to groq * Added max_tokens to groq.ts and chatGroqLlamaindex.ts plus updated groq models removing the outdated models and adding new models such as compound-beta * Patched OpenAI typo on ChatGroq_LLamaIndex.ts * Patching groq llamaindex * Patched pnpm lint error * Removed retundant image * Update ChatGroq_LlamaIndex.ts --------- Co-authored-by: Henry Heng <[email protected]>
1 parent d75e847 commit ac9d732

File tree

3 files changed

+44
-26
lines changed

3 files changed

+44
-26
lines changed

packages/components/models.json

Lines changed: 24 additions & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -668,6 +668,22 @@
668668
{
669669
"name": "groqChat",
670670
"models": [
671+
{
672+
"label": "meta-llama/llama-4-maverick-17b-128e-instruct",
673+
"name": "meta-llama/llama-4-maverick-17b-128e-instruct"
674+
},
675+
{
676+
"label": "meta-llama/llama-4-scout-17b-16e-instruct",
677+
"name": "meta-llama/llama-4-scout-17b-16e-instruct"
678+
},
679+
{
680+
"label": "coumpound-beta",
681+
"name": "compound-beta"
682+
},
683+
{
684+
"label": "compound-beta-mini",
685+
"name": "compound-beta-mini"
686+
},
671687
{
672688
"label": "deepseek-r1-distill-llama-70b",
673689
"name": "deepseek-r1-distill-llama-70b"
@@ -696,29 +712,13 @@
696712
"label": "llama-3.2-90b-text-preview",
697713
"name": "llama-3.2-90b-text-preview"
698714
},
699-
{
700-
"label": "llama-3.1-405b-reasoning",
701-
"name": "llama-3.1-405b-reasoning"
702-
},
703-
{
704-
"label": "llama-3.1-70b-versatile",
705-
"name": "llama-3.1-70b-versatile"
706-
},
707715
{
708716
"label": "llama-3.1-8b-instant",
709717
"name": "llama-3.1-8b-instant"
710718
},
711719
{
712-
"label": "llama3-groq-70b-8192-tool-use-preview",
713-
"name": "llama3-groq-70b-8192-tool-use-preview"
714-
},
715-
{
716-
"label": "llama3-groq-8b-8192-tool-use-preview",
717-
"name": "llama3-groq-8b-8192-tool-use-preview"
718-
},
719-
{
720-
"label": "gemma-7b-it",
721-
"name": "gemma-7b-it"
720+
"label": "gemma-2-9b-it",
721+
"name": "gemma-2-9b-it"
722722
},
723723
{
724724
"label": "llama3-70b-8192",
@@ -729,16 +729,16 @@
729729
"name": "llama3-8b-8192"
730730
},
731731
{
732-
"label": "mixtral-8x7b-32768",
733-
"name": "mixtral-8x7b-32768"
732+
"label": "mixtral-saba-24b",
733+
"name": "mixtral-saba-24b"
734734
},
735735
{
736-
"label": "meta-llama/llama-4-maverick-17b-128e-instruct",
737-
"name": "meta-llama/llama-4-maverick-17b-128e-instruct"
736+
"label": "qwen-qwq-32b",
737+
"name": "qwen-qwq-32b"
738738
},
739739
{
740-
"label": "meta-llama/llama-4-scout-17b-16e-instruct",
741-
"name": "meta-llama/llama-4-scout-17b-16e-instruct"
740+
"label": "allam-2-7b",
741+
"name": "allam-2-7b"
742742
}
743743
]
744744
},

packages/components/nodes/chatmodels/Groq/ChatGroq_LlamaIndex.ts

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -48,6 +48,14 @@ class ChatGroq_LlamaIndex_ChatModels implements INode {
4848
step: 0.1,
4949
default: 0.9,
5050
optional: true
51+
},
52+
{
53+
label: 'Max Tokens',
54+
name: 'maxTokens',
55+
type: 'number',
56+
step: 1,
57+
optional: true,
58+
additionalParams: true
5159
}
5260
]
5361
}
@@ -62,7 +70,7 @@ class ChatGroq_LlamaIndex_ChatModels implements INode {
6270
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
6371
const temperature = nodeData.inputs?.temperature as string
6472
const modelName = nodeData.inputs?.modelName as string
65-
73+
const maxTokens = nodeData.inputs?.maxTokens as string
6674
const credentialData = await getCredentialData(nodeData.credential ?? '', options)
6775
const groqApiKey = getCredentialParam('groqApiKey', credentialData, nodeData)
6876

@@ -71,7 +79,7 @@ class ChatGroq_LlamaIndex_ChatModels implements INode {
7179
model: modelName,
7280
apiKey: groqApiKey
7381
}
74-
82+
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
7583
const model = new Groq(obj)
7684
return model
7785
}

packages/components/nodes/chatmodels/Groq/Groq.ts

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -54,6 +54,14 @@ class Groq_ChatModels implements INode {
5454
default: 0.9,
5555
optional: true
5656
},
57+
{
58+
label: 'Max Tokens',
59+
name: 'maxTokens',
60+
type: 'number',
61+
step: 1,
62+
optional: true,
63+
additionalParams: true
64+
},
5765
{
5866
label: 'Streaming',
5967
name: 'streaming',
@@ -73,6 +81,7 @@ class Groq_ChatModels implements INode {
7381

7482
async init(nodeData: INodeData, _: string, options: ICommonObject): Promise<any> {
7583
const modelName = nodeData.inputs?.modelName as string
84+
const maxTokens = nodeData.inputs?.maxTokens as string
7685
const cache = nodeData.inputs?.cache as BaseCache
7786
const temperature = nodeData.inputs?.temperature as string
7887
const streaming = nodeData.inputs?.streaming as boolean
@@ -86,6 +95,7 @@ class Groq_ChatModels implements INode {
8695
apiKey: groqApiKey,
8796
streaming: streaming ?? true
8897
}
98+
if (maxTokens) obj.maxTokens = parseInt(maxTokens, 10)
8999
if (cache) obj.cache = cache
90100

91101
const model = new ChatGroq(obj)

0 commit comments

Comments
 (0)