Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions agent-manager/prisma/schema.prisma
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ model Agent {
triggers Trigger[] // One-to-many relationship with Trigger
template Template? @relation(fields: [template_id], references: [id], onDelete: SetNull, onUpdate: Cascade)
secret_key Bytes @db.ByteA
config Json?
}

model Trigger {
Expand Down
5 changes: 4 additions & 1 deletion agent-manager/src/repository/agent_manager_repository.ts
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@ export async function fetchAgentConfiguration(agentId: string): Promise<
configurations: any[]
agentIndex: number
agentName: string
agentConfig: any
}
| undefined
> {
Expand All @@ -46,6 +47,8 @@ export async function fetchAgentConfiguration(agentId: string): Promise<
const instanceCount = Number(agentInstance.instance)
const agentIndex = Number(agentInstance.index)
const agentName = agentInstance.name
const agentConfig = (agentInstance as any).config || null

const configurationsData = agentConfigurations.map(
(config: { id: string; type: string; data: JsonValue; action: JsonValue }) => ({
id: config.id,
Expand All @@ -55,7 +58,7 @@ export async function fetchAgentConfiguration(agentId: string): Promise<
})
)

return { instanceCount, configurations: configurationsData, agentIndex, agentName }
return { instanceCount, configurations: configurationsData, agentIndex, agentName, agentConfig }
}
} catch (error: any) {
console.log(`Error fetching agent configuration: ${error}`)
Expand Down
3 changes: 2 additions & 1 deletion agent-node/.env.example
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
TOKEN=
WS_URL=ws://localhost:3001
WS_URL=ws://localhost:3001
GEMINI_API_KEY=
3 changes: 2 additions & 1 deletion agent-node/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,7 @@
"description": "",
"dependencies": {
"@emurgo/cardano-serialization-lib-asmjs": "^11.5.0",
"@google/genai": "^1.13.0",
"@types/ws": "^8.5.10",
"axios": "^1.6.8",
"bech32": "^2.0.0",
Expand All @@ -29,8 +30,8 @@
"ws": "^8.18.0"
},
"devDependencies": {
"@types/luxon": "^3.4.2",
"@eslint/js": "^9.4.0",
"@types/luxon": "^3.4.2",
"@types/node-cron": "^3.0.11",
"@types/websocket": "^1.0.10",
"eslint": "8",
Expand Down
7 changes: 7 additions & 0 deletions agent-node/src/constants/global.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,9 +3,16 @@ import { IEventBasedAction } from '../types/eventTriger'
export const globalState: {
eventTriggerTypeDetails: IEventBasedAction[]
agentName: string
systemPrompt:string
functionLLMSettings: Record<
string,
{ enabled: boolean; userPrefText: string; prefs?: any }
>
} = {
eventTriggerTypeDetails: [],
agentName: '',
systemPrompt:'',
functionLLMSettings: {}
}

export const globalRootKeyBuffer: { value: Buffer | null } = {
Expand Down
2 changes: 1 addition & 1 deletion agent-node/src/executor/AgentRunner.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ export class AgentRunner {
this.executor = new Executor(null, managerInterface, txListener)
}

invokeFunction(triggerType: TriggerType, instanceIndex: number, method: string, ...args: any) {
async invokeFunction(triggerType: TriggerType, instanceIndex: number, method: string, ...args: any) {
this.executor.invokeFunction(method, ...args).then((result) => {
saveTxLog(result, this.managerInterface, triggerType, instanceIndex)
})
Expand Down
89 changes: 89 additions & 0 deletions agent-node/src/executor/LLMGatedRunner.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
import { TriggerType } from '../service/triggerService'
import { saveTxLog } from '../utils/agent'
import { globalState } from '../constants/global'
import { LLMService } from '../service/LLMService'
import { EventContext } from './BaseFunction'
import { AgentRunner } from './AgentRunner'

export class LLMGatedRunner {
constructor(private readonly core: AgentRunner) {}

async invokeFunction(triggerType: TriggerType, instanceIndex: number, method: string, ...args: any) {
const extractedArgs = this.extractArgumentValues(args)
const shouldGate = this.shouldUseLLMForFunction(method) && this.isCron(triggerType)
if (shouldGate) {
try {
const llm = new LLMService()
const decision = await llm.shouldExecuteFunction(
method,
extractedArgs,
{},
this.getUserPreferenceText(method),
this.getSystemPrompt()
)
if (!decision.should_execute) {
const blocked = [
{
function: method,
arguments: args,
return: {
operation: method,
executed: false,
blocked_by_llm: true,
llm_reasoning: decision.reasoning,
llm_confidence: decision.confidence,
message: `LLM blocked: ${decision.reasoning}`,
timestamp: new Date().toISOString(),
},
},
]
saveTxLog(blocked, (this.core as any).managerInterface, triggerType, instanceIndex)
return
}
} catch (e) {
console.error(`LLM gating failed, continuing: ${e}`)
}
}
return this.core.invokeFunction(triggerType, instanceIndex, method, ...args)
}

async invokeFunctionWithEventContext(
eventFilterContext: any,
context: EventContext,
triggerType: TriggerType,
instanceIndex: number,
method: string,
parameters: any[]
) {
return this.core.invokeFunctionWithEventContext(
eventFilterContext,
context,
triggerType,
instanceIndex,
method,
parameters
)
}

async remakeContext(index: number) {
return this.core.remakeContext(index)
}

// helpers
private isCron(triggerType: TriggerType): boolean {
return String(triggerType) === 'CRON'
}
private shouldUseLLMForFunction(method: string): boolean {
const fnCfg = globalState.functionLLMSettings?.[method]
return !!(fnCfg && fnCfg.enabled)
}
private getUserPreferenceText(method: string): string {
return globalState.functionLLMSettings?.[method]?.userPrefText || ''
}
private getSystemPrompt(): string {
return (globalState.systemPrompt ?? '').toString()
}
private extractArgumentValues(args: any[]) {
return args.map((a) => (a && typeof a === 'object' && 'value' in a ? a.value : a))
}
}
49 changes: 47 additions & 2 deletions agent-node/src/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@ import { AgentRunner } from './executor/AgentRunner'
import { decodeBase64string } from './utils/base64converter'
import { validateToken } from './utils/validator'
import { getHandlers } from './executor/AgentFunctions'
import { LLMGatedRunner } from './executor/LlmGatedRunner'

configDotenv()
let wsUrl: string = process.env.WS_URL as string
Expand Down Expand Up @@ -62,7 +63,7 @@ function connectToManagerWebSocket() {
const managerInterface = new ManagerInterface(rpcChannel)
const txListener = new TxListener()

const agentRunners: Array<AgentRunner> = []
const agentRunners: Array<any> = []

rpcChannel.on('methodCall', (method, args) => {
agentRunners.forEach((runner, index) => {
Expand All @@ -71,14 +72,58 @@ function connectToManagerWebSocket() {
})

const topicHandler = new RpcTopicHandler(managerInterface, txListener)

// LLM settings extractor from configurations
function applyFnSettingsFromConfigurations(message: any) {
if (!message?.configurations) return
globalState.functionLLMSettings = {}
message.configurations.forEach((cfg: any) => {
const act = cfg?.action || {}
if (act.function_name) {
globalState.functionLLMSettings[act.function_name] = {
enabled: !!act.llm_enabled,
userPrefText: act.llm_user_preferences_text || '',
prefs: act.llm_preferences || undefined,
}
}
})
console.log('[INIT] LLM settings for:', Object.keys(globalState.functionLLMSettings))
}

// loads the system prompt
function applySystemPromptFromMessage(message: any, logCtx: string) {
const prompt = message?.agentConfig?.system_prompt ?? message?.config?.system_prompt

if (typeof prompt === 'string' && prompt.length && prompt !== globalState.systemPrompt) {
globalState.systemPrompt = prompt
}
}

rpcChannel.on('event', (topic, message) => {
// initial payload containing configs
if (topic === 'initial_config') {
applySystemPromptFromMessage(message, 'initial_config')
applyFnSettingsFromConfigurations(message)
return
}

// config updates from manager
if (topic === 'config_updated') {
applyFnSettingsFromConfigurations(message)
applySystemPromptFromMessage(message, 'initial_config')
}

if (topic == 'instance_count') {
applySystemPromptFromMessage(message, 'initial_config')
applyFnSettingsFromConfigurations(message)

globalRootKeyBuffer.value = message.rootKeyBuffer
globalState.agentName = message.agentName
Array(message.instanceCount)
.fill('')
.forEach(async (item, index) => {
const runner = new AgentRunner(managerInterface, txListener)
const coreRunner = new AgentRunner(managerInterface, txListener)
const runner = new LLMGatedRunner(coreRunner)
await runner.remakeContext(index)
agentRunners.push(runner)
})
Expand Down
127 changes: 127 additions & 0 deletions agent-node/src/service/LLMService.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,127 @@
import 'dotenv/config'
import { GoogleGenAI } from '@google/genai'

const ai = new GoogleGenAI({})

export class LLMService {
private apiKey: string

constructor() {
this.apiKey = process.env.GEMINI_API_KEY || ''
if (!this.apiKey) {
console.warn('No Gemini API key')
}
}

async shouldExecuteFunction(
functionName: string,
functionArgs: any[],
structuredPreferences: any,
userPreferenceText: any,
systemPrompt: string
): Promise<{
should_execute: boolean
confidence: number
reasoning: string
}> {
if (!this.apiKey) {
console.log('LLM not configured')
return {
should_execute: true,
confidence: 0.5,
reasoning: 'LLM not configured, default allow',
}
}

try {
const prompt = this.buildPrompt(
functionName,
functionArgs,
structuredPreferences,
userPreferenceText,
systemPrompt
)

console.log('Asking LLM...')

const response: any = await ai.models.generateContent({
model: 'gemini-2.5-flash',
contents: prompt,
})

console.log('Response from Gemini:', response.text)

const decision = this.extractJson(response.text)
console.log('After parsing:', decision)

return {
should_execute: decision.should_execute,
confidence: decision.confidence,
reasoning: decision.reasoning,
}
} catch (error: any) {
console.error('LLM failed, error:', error)
return {
should_execute: true,
confidence: 0.6,
reasoning: `LLM service failed: ${error.message}`,
}
}
}

private buildPrompt(
functionName: string,
functionArgs: any[],
structuredPreferences: any,
userPreferenceText: any,
systemPrompt: string
): string {
const baseSystemP = systemPrompt || 'You are a Cardano autonomous agent'
console.log('System prompt check:', systemPrompt)

const context =
structuredPreferences && Object.keys(structuredPreferences).length
? `\nContext:\n${JSON.stringify(structuredPreferences, null, 2)} `
: ''

const userPolicy = userPreferenceText
? `\nUser Policy:\n${userPreferenceText}`
: ''

console.log('User policy:', userPolicy)
console.log('Context:', context)

return `
${baseSystemP}

FUNCTION TO EXECUTE: ${functionName}
Args: ${JSON.stringify(functionArgs)}${context}${userPolicy}

Analyze this call strictly against "User Policy" and System prompt.
Return ONLY JSON:
{"should_execute": true/false, "confidence": 0.0-1.0, "reasoning": "brief"}
`
}

extractJson(text: string): any {
const cleaned = text.replace(/```json|```/g, '').trim()

const start = cleaned.indexOf('{')
const end = cleaned.lastIndexOf('}')

if (start !== -1 && end !== -1) {
const jsonString = cleaned.substring(start, end + 1)
try {
return JSON.parse(jsonString)
} catch (e) {
console.error('JSON parse error:', e, jsonString)
}
}

return {
should_execute: false,
confidence: 0.0,
reasoning: 'Failed to parse LLM response',
}
}
}
Loading