11import cbws from '../core/websocket' ;
22import { LLMResponse } from '@codebolt/types' ;
3+
4+ /**
5+ * Represents a message in the conversation with roles and content.
6+ */
7+ export interface Message {
8+ /** The role of the message sender: user, assistant, tool, or system */
9+ role : 'user' | 'assistant' | 'tool' | 'system' ;
10+ /** The content of the message, can be an array of content blocks or a string */
11+ content : any [ ] | string ;
12+ /** Optional ID for tool calls */
13+ tool_call_id ?: string ;
14+ /** Optional tool calls for assistant messages */
15+ tool_calls ?: ToolCall [ ] ;
16+ /** Additional properties that might be present */
17+ [ key : string ] : any ;
18+ }
19+
20+ /**
21+ * Represents a tool call in OpenAI format
22+ */
23+ export interface ToolCall {
24+ /** Unique identifier for this tool call */
25+ id : string ;
26+ /** The type of tool call */
27+ type : 'function' ;
28+ /** Function call details */
29+ function : {
30+ /** Name of the function to call */
31+ name : string ;
32+ /** Arguments for the function call as JSON string */
33+ arguments : string ;
34+ } ;
35+ }
36+
37+ /**
38+ * Represents a tool definition in OpenAI format
39+ */
40+ export interface Tool {
41+ /** The type of tool */
42+ type : 'function' ;
43+ /** Function definition */
44+ function : {
45+ /** Name of the function */
46+ name : string ;
47+ /** Description of what the function does */
48+ description ?: string ;
49+ /** JSON schema for the function parameters */
50+ parameters ?: any ;
51+ } ;
52+ }
53+
54+ /**
55+ * LLM inference request parameters
56+ */
57+ export interface LLMInferenceParams {
58+ /** Array of messages in the conversation */
59+ messages : Message [ ] ;
60+ /** Available tools for the model to use */
61+ tools ?: Tool [ ] ;
62+ /** How the model should use tools */
63+ tool_choice ?: 'auto' | 'none' | 'required' | { type : 'function' ; function : { name : string } } ;
64+ /** The LLM role to determine which model to use */
65+ llmrole : string ;
66+ /** Maximum number of tokens to generate */
67+ max_tokens ?: number ;
68+ /** Temperature for response generation */
69+ temperature ?: number ;
70+ /** Whether to stream the response */
71+ stream ?: boolean ;
72+ }
73+
374/**
475 * A module for interacting with language learning models (LLMs) via WebSocket.
576 */
677const cbllm = {
778 /**
8- * Sends an inference request to the LLM and returns the model's response .
79+ * Sends an inference request to the LLM using OpenAI message format with tools support .
980 * The model is selected based on the provided `llmrole`. If the specific model
1081 * for the role is not found, it falls back to the default model for the current agent,
1182 * and ultimately to the default application-wide LLM if necessary.
1283 *
13- * @param {string } message - The input message or prompt to be sent to the LLM.
14- * @param {string } llmrole - The role of the LLM to determine which model to use.
84+ * @param {LLMInferenceParams } message - The inference parameters including messages, tools, and options.
1585 * @returns {Promise<LLMResponse> } A promise that resolves with the LLM's response.
1686 */
17- inference : async ( message : string , llmrole : string ) : Promise < LLMResponse > => {
87+ inference : async ( message : LLMInferenceParams ) : Promise < LLMResponse > => {
1888 return cbws . messageManager . sendAndWaitForResponse (
1989 {
2090 "type" : "inference" ,
2191 "message" : {
22- prompt : message ,
23- llmrole
92+ messages : message . messages ,
93+ tools : message . tools ,
94+ tool_choice : message . tool_choice ,
95+ llmrole : message . llmrole
2496 } ,
2597 } ,
2698 "llmResponse"
2799 ) ;
100+ } ,
101+
102+ /**
103+ * Legacy method for backward compatibility - converts simple string prompt to message format.
104+ * @deprecated Use the new inference method with proper message format instead.
105+ *
106+ * @param {string } message - The input message or prompt to be sent to the LLM.
107+ * @param {string } llmrole - The role of the LLM to determine which model to use.
108+ * @returns {Promise<LLMResponse> } A promise that resolves with the LLM's response.
109+ */
110+ legacyInference : async ( message : string , llmrole : string ) : Promise < LLMResponse > => {
111+ const messages : Message [ ] = [
112+ {
113+ role : 'user' ,
114+ content : message
115+ }
116+ ] ;
117+
118+ return cbllm . inference ( {
119+ messages,
120+ llmrole
121+ } ) ;
28122 }
29123} ;
30124
31- export default cbllm ;
125+ export default cbllm ;
0 commit comments