Skip to content

Commit 857c1c5

Browse files
authored
Merge pull request #350 from AayushGTM6693/feat/llm
Feat(llm): add LLM gating
2 parents b74d5fb + f6e8a24 commit 857c1c5

File tree

27 files changed

+637
-25
lines changed

27 files changed

+637
-25
lines changed

agent-manager/prisma/schema.prisma

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ model Agent {
2626
triggers Trigger[] // One-to-many relationship with Trigger
2727
template Template? @relation(fields: [template_id], references: [id], onDelete: SetNull, onUpdate: Cascade)
2828
secret_key Bytes @db.ByteA
29+
config Json?
2930
}
3031

3132
model Trigger {

agent-manager/src/repository/agent_manager_repository.ts

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,7 @@ export async function fetchAgentConfiguration(agentId: string): Promise<
2424
configurations: any[]
2525
agentIndex: number
2626
agentName: string
27+
agentConfig: any
2728
}
2829
| undefined
2930
> {
@@ -46,6 +47,8 @@ export async function fetchAgentConfiguration(agentId: string): Promise<
4647
const instanceCount = Number(agentInstance.instance)
4748
const agentIndex = Number(agentInstance.index)
4849
const agentName = agentInstance.name
50+
const agentConfig = (agentInstance as any).config || null
51+
4952
const configurationsData = agentConfigurations.map(
5053
(config: { id: string; type: string; data: JsonValue; action: JsonValue }) => ({
5154
id: config.id,
@@ -55,7 +58,7 @@ export async function fetchAgentConfiguration(agentId: string): Promise<
5558
})
5659
)
5760

58-
return { instanceCount, configurations: configurationsData, agentIndex, agentName }
61+
return { instanceCount, configurations: configurationsData, agentIndex, agentName, agentConfig }
5962
}
6063
} catch (error: any) {
6164
console.log(`Error fetching agent configuration: ${error}`)

agent-node/.env.example

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,3 @@
11
TOKEN=
2-
WS_URL=ws://localhost:3001
2+
WS_URL=ws://localhost:3001
3+
GEMINI_API_KEY=

agent-node/package.json

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@
1818
"description": "",
1919
"dependencies": {
2020
"@emurgo/cardano-serialization-lib-asmjs": "^11.5.0",
21+
"@google/genai": "^1.13.0",
2122
"@types/ws": "^8.5.10",
2223
"axios": "^1.6.8",
2324
"bech32": "^2.0.0",
@@ -29,8 +30,8 @@
2930
"ws": "^8.18.0"
3031
},
3132
"devDependencies": {
32-
"@types/luxon": "^3.4.2",
3333
"@eslint/js": "^9.4.0",
34+
"@types/luxon": "^3.4.2",
3435
"@types/node-cron": "^3.0.11",
3536
"@types/websocket": "^1.0.10",
3637
"eslint": "8",

agent-node/src/constants/global.ts

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,16 @@ import { IEventBasedAction } from '../types/eventTriger'
33
export const globalState: {
44
eventTriggerTypeDetails: IEventBasedAction[]
55
agentName: string
6+
systemPrompt:string
7+
functionLLMSettings: Record<
8+
string,
9+
{ enabled: boolean; userPrefText: string; prefs?: any }
10+
>
611
} = {
712
eventTriggerTypeDetails: [],
813
agentName: '',
14+
systemPrompt:'',
15+
functionLLMSettings: {}
916
}
1017

1118
export const globalRootKeyBuffer: { value: Buffer | null } = {

agent-node/src/executor/AgentRunner.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,7 +18,7 @@ export class AgentRunner {
1818
this.executor = new Executor(null, managerInterface, txListener)
1919
}
2020

21-
invokeFunction(triggerType: TriggerType, instanceIndex: number, method: string, ...args: any) {
21+
async invokeFunction(triggerType: TriggerType, instanceIndex: number, method: string, ...args: any) {
2222
this.executor.invokeFunction(method, ...args).then((result) => {
2323
saveTxLog(result, this.managerInterface, triggerType, instanceIndex)
2424
})
Lines changed: 89 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
import { TriggerType } from '../service/triggerService'
2+
import { saveTxLog } from '../utils/agent'
3+
import { globalState } from '../constants/global'
4+
import { LLMService } from '../service/LLMService'
5+
import { EventContext } from './BaseFunction'
6+
import { AgentRunner } from './AgentRunner'
7+
8+
export class LLMGatedRunner {
9+
constructor(private readonly core: AgentRunner) {}
10+
11+
async invokeFunction(triggerType: TriggerType, instanceIndex: number, method: string, ...args: any) {
12+
const extractedArgs = this.extractArgumentValues(args)
13+
const shouldGate = this.shouldUseLLMForFunction(method) && this.isCron(triggerType)
14+
if (shouldGate) {
15+
try {
16+
const llm = new LLMService()
17+
const decision = await llm.shouldExecuteFunction(
18+
method,
19+
extractedArgs,
20+
{},
21+
this.getUserPreferenceText(method),
22+
this.getSystemPrompt()
23+
)
24+
if (!decision.should_execute) {
25+
const blocked = [
26+
{
27+
function: method,
28+
arguments: args,
29+
return: {
30+
operation: method,
31+
executed: false,
32+
blocked_by_llm: true,
33+
llm_reasoning: decision.reasoning,
34+
llm_confidence: decision.confidence,
35+
message: `LLM blocked: ${decision.reasoning}`,
36+
timestamp: new Date().toISOString(),
37+
},
38+
},
39+
]
40+
saveTxLog(blocked, (this.core as any).managerInterface, triggerType, instanceIndex)
41+
return
42+
}
43+
} catch (e) {
44+
console.error(`LLM gating failed, continuing: ${e}`)
45+
}
46+
}
47+
return this.core.invokeFunction(triggerType, instanceIndex, method, ...args)
48+
}
49+
50+
async invokeFunctionWithEventContext(
51+
eventFilterContext: any,
52+
context: EventContext,
53+
triggerType: TriggerType,
54+
instanceIndex: number,
55+
method: string,
56+
parameters: any[]
57+
) {
58+
return this.core.invokeFunctionWithEventContext(
59+
eventFilterContext,
60+
context,
61+
triggerType,
62+
instanceIndex,
63+
method,
64+
parameters
65+
)
66+
}
67+
68+
async remakeContext(index: number) {
69+
return this.core.remakeContext(index)
70+
}
71+
72+
// helpers
73+
private isCron(triggerType: TriggerType): boolean {
74+
return String(triggerType) === 'CRON'
75+
}
76+
private shouldUseLLMForFunction(method: string): boolean {
77+
const fnCfg = globalState.functionLLMSettings?.[method]
78+
return !!(fnCfg && fnCfg.enabled)
79+
}
80+
private getUserPreferenceText(method: string): string {
81+
return globalState.functionLLMSettings?.[method]?.userPrefText || ''
82+
}
83+
private getSystemPrompt(): string {
84+
return (globalState.systemPrompt ?? '').toString()
85+
}
86+
private extractArgumentValues(args: any[]) {
87+
return args.map((a) => (a && typeof a === 'object' && 'value' in a ? a.value : a))
88+
}
89+
}

agent-node/src/index.ts

Lines changed: 47 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@ import { AgentRunner } from './executor/AgentRunner'
1414
import { decodeBase64string } from './utils/base64converter'
1515
import { validateToken } from './utils/validator'
1616
import { getHandlers } from './executor/AgentFunctions'
17+
import { LLMGatedRunner } from './executor/LlmGatedRunner'
1718

1819
configDotenv()
1920
let wsUrl: string = process.env.WS_URL as string
@@ -62,7 +63,7 @@ function connectToManagerWebSocket() {
6263
const managerInterface = new ManagerInterface(rpcChannel)
6364
const txListener = new TxListener()
6465

65-
const agentRunners: Array<AgentRunner> = []
66+
const agentRunners: Array<any> = []
6667

6768
rpcChannel.on('methodCall', (method, args) => {
6869
agentRunners.forEach((runner, index) => {
@@ -71,14 +72,58 @@ function connectToManagerWebSocket() {
7172
})
7273

7374
const topicHandler = new RpcTopicHandler(managerInterface, txListener)
75+
76+
// LLM settings extractor from configurations
77+
function applyFnSettingsFromConfigurations(message: any) {
78+
if (!message?.configurations) return
79+
globalState.functionLLMSettings = {}
80+
message.configurations.forEach((cfg: any) => {
81+
const act = cfg?.action || {}
82+
if (act.function_name) {
83+
globalState.functionLLMSettings[act.function_name] = {
84+
enabled: !!act.llm_enabled,
85+
userPrefText: act.llm_user_preferences_text || '',
86+
prefs: act.llm_preferences || undefined,
87+
}
88+
}
89+
})
90+
console.log('[INIT] LLM settings for:', Object.keys(globalState.functionLLMSettings))
91+
}
92+
93+
// loads the system prompt
94+
function applySystemPromptFromMessage(message: any, logCtx: string) {
95+
const prompt = message?.agentConfig?.system_prompt ?? message?.config?.system_prompt
96+
97+
if (typeof prompt === 'string' && prompt.length && prompt !== globalState.systemPrompt) {
98+
globalState.systemPrompt = prompt
99+
}
100+
}
101+
74102
rpcChannel.on('event', (topic, message) => {
103+
// initial payload containing configs
104+
if (topic === 'initial_config') {
105+
applySystemPromptFromMessage(message, 'initial_config')
106+
applyFnSettingsFromConfigurations(message)
107+
return
108+
}
109+
110+
// config updates from manager
111+
if (topic === 'config_updated') {
112+
applyFnSettingsFromConfigurations(message)
113+
applySystemPromptFromMessage(message, 'initial_config')
114+
}
115+
75116
if (topic == 'instance_count') {
117+
applySystemPromptFromMessage(message, 'initial_config')
118+
applyFnSettingsFromConfigurations(message)
119+
76120
globalRootKeyBuffer.value = message.rootKeyBuffer
77121
globalState.agentName = message.agentName
78122
Array(message.instanceCount)
79123
.fill('')
80124
.forEach(async (item, index) => {
81-
const runner = new AgentRunner(managerInterface, txListener)
125+
const coreRunner = new AgentRunner(managerInterface, txListener)
126+
const runner = new LLMGatedRunner(coreRunner)
82127
await runner.remakeContext(index)
83128
agentRunners.push(runner)
84129
})
Lines changed: 127 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,127 @@
1+
import 'dotenv/config'
2+
import { GoogleGenAI } from '@google/genai'
3+
4+
const ai = new GoogleGenAI({})
5+
6+
export class LLMService {
7+
private apiKey: string
8+
9+
constructor() {
10+
this.apiKey = process.env.GEMINI_API_KEY || ''
11+
if (!this.apiKey) {
12+
console.warn('No Gemini API key')
13+
}
14+
}
15+
16+
async shouldExecuteFunction(
17+
functionName: string,
18+
functionArgs: any[],
19+
structuredPreferences: any,
20+
userPreferenceText: any,
21+
systemPrompt: string
22+
): Promise<{
23+
should_execute: boolean
24+
confidence: number
25+
reasoning: string
26+
}> {
27+
if (!this.apiKey) {
28+
console.log('LLM not configured')
29+
return {
30+
should_execute: true,
31+
confidence: 0.5,
32+
reasoning: 'LLM not configured, default allow',
33+
}
34+
}
35+
36+
try {
37+
const prompt = this.buildPrompt(
38+
functionName,
39+
functionArgs,
40+
structuredPreferences,
41+
userPreferenceText,
42+
systemPrompt
43+
)
44+
45+
console.log('Asking LLM...')
46+
47+
const response: any = await ai.models.generateContent({
48+
model: 'gemini-2.5-flash',
49+
contents: prompt,
50+
})
51+
52+
console.log('Response from Gemini:', response.text)
53+
54+
const decision = this.extractJson(response.text)
55+
console.log('After parsing:', decision)
56+
57+
return {
58+
should_execute: decision.should_execute,
59+
confidence: decision.confidence,
60+
reasoning: decision.reasoning,
61+
}
62+
} catch (error: any) {
63+
console.error('LLM failed, error:', error)
64+
return {
65+
should_execute: true,
66+
confidence: 0.6,
67+
reasoning: `LLM service failed: ${error.message}`,
68+
}
69+
}
70+
}
71+
72+
private buildPrompt(
73+
functionName: string,
74+
functionArgs: any[],
75+
structuredPreferences: any,
76+
userPreferenceText: any,
77+
systemPrompt: string
78+
): string {
79+
const baseSystemP = systemPrompt || 'You are a Cardano autonomous agent'
80+
console.log('System prompt check:', systemPrompt)
81+
82+
const context =
83+
structuredPreferences && Object.keys(structuredPreferences).length
84+
? `\nContext:\n${JSON.stringify(structuredPreferences, null, 2)} `
85+
: ''
86+
87+
const userPolicy = userPreferenceText
88+
? `\nUser Policy:\n${userPreferenceText}`
89+
: ''
90+
91+
console.log('User policy:', userPolicy)
92+
console.log('Context:', context)
93+
94+
return `
95+
${baseSystemP}
96+
97+
FUNCTION TO EXECUTE: ${functionName}
98+
Args: ${JSON.stringify(functionArgs)}${context}${userPolicy}
99+
100+
Analyze this call strictly against "User Policy" and System prompt.
101+
Return ONLY JSON:
102+
{"should_execute": true/false, "confidence": 0.0-1.0, "reasoning": "brief"}
103+
`
104+
}
105+
106+
extractJson(text: string): any {
107+
const cleaned = text.replace(/```json|```/g, '').trim()
108+
109+
const start = cleaned.indexOf('{')
110+
const end = cleaned.lastIndexOf('}')
111+
112+
if (start !== -1 && end !== -1) {
113+
const jsonString = cleaned.substring(start, end + 1)
114+
try {
115+
return JSON.parse(jsonString)
116+
} catch (e) {
117+
console.error('JSON parse error:', e, jsonString)
118+
}
119+
}
120+
121+
return {
122+
should_execute: false,
123+
confidence: 0.0,
124+
reasoning: 'Failed to parse LLM response',
125+
}
126+
}
127+
}

0 commit comments

Comments
 (0)