Skip to content

Commit 7b2ce9b

Browse files
chages
1 parent 05269d8 commit 7b2ce9b

File tree

4 files changed

+111
-78
lines changed

4 files changed

+111
-78
lines changed

package.json

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,17 +32,22 @@
3232
"@codebolt/types": "^1.0.10",
3333
"@modelcontextprotocol/sdk": "^1.4.1",
3434
"@types/pdf-parse": "^1.1.5",
35+
"buffer": "^6.0.3",
3536
"execa": "^9.5.2",
3637
"file-type": "^19.6.0",
3738
"fuse.js": "^7.0.0",
3839
"js-yaml": "^4.1.0",
3940
"load-esm": "^1.0.1",
4041
"mcp-proxy": "^2.4.0",
42+
"os-browserify": "^0.3.0",
43+
"path-browserify": "^1.0.1",
4144
"pdf-parse": "^1.1.1",
45+
"stream-browserify": "^3.0.0",
4246
"strict-event-emitter-types": "^2.0.0",
4347
"timers": "^0.1.1",
4448
"undici": "^7.4.0",
4549
"uri-templates": "^0.2.0",
50+
"util": "^0.12.5",
4651
"web-tree-sitter": "^0.24.1",
4752
"ws": "^8.17.0",
4853
"yargs": "^17.7.2",

src/index.ts

Lines changed: 70 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,9 +25,77 @@ import {chatSummary} from './modules/history'
2525
import codeboltTools from './modules/tools';
2626
import cbagent from './modules/agent';
2727
import cbutils from './modules/utils';
28+
import type { LLMResponse } from '@codebolt/types';
2829

29-
// Export types for clean imports
30-
export type { Message, ToolCall, Tool, LLMInferenceParams } from './modules/llm';
30+
/**
31+
* Represents a message in the conversation with roles and content.
32+
*/
33+
export interface Message {
34+
/** The role of the message sender: user, assistant, tool, or system */
35+
role: 'user' | 'assistant' | 'tool' | 'system';
36+
/** The content of the message, can be an array of content blocks or a string */
37+
content: any[] | string;
38+
/** Optional ID for tool calls */
39+
tool_call_id?: string;
40+
/** Optional tool calls for assistant messages */
41+
tool_calls?: ToolCall[];
42+
/** Additional properties that might be present */
43+
[key: string]: any;
44+
}
45+
46+
/**
47+
* Represents a tool call in OpenAI format
48+
*/
49+
export interface ToolCall {
50+
/** Unique identifier for this tool call */
51+
id: string;
52+
/** The type of tool call */
53+
type: 'function';
54+
/** Function call details */
55+
function: {
56+
/** Name of the function to call */
57+
name: string;
58+
/** Arguments for the function call as JSON string */
59+
arguments: string;
60+
};
61+
}
62+
63+
/**
64+
* Represents a tool definition in OpenAI format
65+
*/
66+
export interface Tool {
67+
/** The type of tool */
68+
type: 'function';
69+
/** Function definition */
70+
function: {
71+
/** Name of the function */
72+
name: string;
73+
/** Description of what the function does */
74+
description?: string;
75+
/** JSON schema for the function parameters */
76+
parameters?: any;
77+
};
78+
}
79+
80+
/**
81+
* LLM inference request parameters
82+
*/
83+
export interface LLMInferenceParams {
84+
/** Array of messages in the conversation */
85+
messages: Message[];
86+
/** Available tools for the model to use */
87+
tools?: Tool[];
88+
/** How the model should use tools */
89+
tool_choice?: 'auto' | 'none' | 'required' | { type: 'function'; function: { name: string } };
90+
/** The LLM role to determine which model to use */
91+
llmrole: string;
92+
/** Maximum number of tokens to generate */
93+
max_tokens?: number;
94+
/** Temperature for response generation */
95+
temperature?: number;
96+
/** Whether to stream the response */
97+
stream?: boolean;
98+
}
3199

32100
/**
33101
* @class Codebolt

src/modules/llm.ts

Lines changed: 29 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -1,75 +1,9 @@
11
import cbws from '../core/websocket';
2-
import {LLMResponse } from '@codebolt/types';
2+
import { LLMResponse } from '@codebolt/types';
3+
import type { Message, ToolCall, Tool, LLMInferenceParams } from '../index';
34

4-
/**
5-
* Represents a message in the conversation with roles and content.
6-
*/
7-
export interface Message {
8-
/** The role of the message sender: user, assistant, tool, or system */
9-
role: 'user' | 'assistant' | 'tool' | 'system';
10-
/** The content of the message, can be an array of content blocks or a string */
11-
content: any[] | string;
12-
/** Optional ID for tool calls */
13-
tool_call_id?: string;
14-
/** Optional tool calls for assistant messages */
15-
tool_calls?: ToolCall[];
16-
/** Additional properties that might be present */
17-
[key: string]: any;
18-
}
19-
20-
/**
21-
* Represents a tool call in OpenAI format
22-
*/
23-
export interface ToolCall {
24-
/** Unique identifier for this tool call */
25-
id: string;
26-
/** The type of tool call */
27-
type: 'function';
28-
/** Function call details */
29-
function: {
30-
/** Name of the function to call */
31-
name: string;
32-
/** Arguments for the function call as JSON string */
33-
arguments: string;
34-
};
35-
}
36-
37-
/**
38-
* Represents a tool definition in OpenAI format
39-
*/
40-
export interface Tool {
41-
/** The type of tool */
42-
type: 'function';
43-
/** Function definition */
44-
function: {
45-
/** Name of the function */
46-
name: string;
47-
/** Description of what the function does */
48-
description?: string;
49-
/** JSON schema for the function parameters */
50-
parameters?: any;
51-
};
52-
}
53-
54-
/**
55-
* LLM inference request parameters
56-
*/
57-
export interface LLMInferenceParams {
58-
/** Array of messages in the conversation */
59-
messages: Message[];
60-
/** Available tools for the model to use */
61-
tools?: Tool[];
62-
/** How the model should use tools */
63-
tool_choice?: 'auto' | 'none' | 'required' | { type: 'function'; function: { name: string } };
64-
/** The LLM role to determine which model to use */
65-
llmrole: string;
66-
/** Maximum number of tokens to generate */
67-
max_tokens?: number;
68-
/** Temperature for response generation */
69-
temperature?: number;
70-
/** Whether to stream the response */
71-
stream?: boolean;
72-
}
5+
// Re-export types for backward compatibility
6+
export type { Message, ToolCall, Tool, LLMInferenceParams };
737

748
/**
759
* A module for interacting with language learning models (LLMs) via WebSocket.
@@ -81,10 +15,32 @@ const cbllm = {
8115
* for the role is not found, it falls back to the default model for the current agent,
8216
* and ultimately to the default application-wide LLM if necessary.
8317
*
84-
* @param {LLMInferenceParams} message - The inference parameters including messages, tools, and options.
85-
* @returns {Promise<LLMResponse>} A promise that resolves with the LLM's response.
18+
* @param message - The inference parameters including:
19+
* - messages: Array of conversation messages
20+
* - tools: Available tools for the model to use
21+
* - tool_choice: How the model should use tools
22+
* - llmrole: The LLM role to determine which model to use
23+
* - max_tokens: Maximum number of tokens to generate
24+
* - temperature: Temperature for response generation
25+
* - stream: Whether to stream the response
26+
* @returns A promise that resolves with the LLM's response
8627
*/
87-
inference: async (message: LLMInferenceParams): Promise<LLMResponse> => {
28+
inference: async (message: {
29+
/** Array of messages in the conversation */
30+
messages: Message[];
31+
/** Available tools for the model to use */
32+
tools?: Tool[];
33+
/** How the model should use tools */
34+
tool_choice?: 'auto' | 'none' | 'required' | { type: 'function'; function: { name: string } };
35+
/** The LLM role to determine which model to use */
36+
llmrole: string;
37+
/** Maximum number of tokens to generate */
38+
max_tokens?: number;
39+
/** Temperature for response generation */
40+
temperature?: number;
41+
/** Whether to stream the response */
42+
stream?: boolean;
43+
}): Promise<LLMResponse> => {
8844
return cbws.messageManager.sendAndWaitForResponse(
8945
{
9046
"type": "inference",

tsconfig.json

Lines changed: 7 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -26,10 +26,13 @@
2626

2727
/* Modules */
2828
"module": "commonjs", /* Specify what module code is generated. */
29-
// "rootDir": "./", /* Specify the root folder within your source files. */
29+
"rootDir": "./src", /* Specify the root folder within your source files. */
3030
// "moduleResolution": "node10", /* Specify how TypeScript looks up a file from a given module specifier. */
31-
// "baseUrl": "./", /* Specify the base directory to resolve non-relative module names. */
32-
// "paths": {}, /* Specify a set of entries that re-map imports to additional lookup locations. */
31+
"baseUrl": "./", /* Specify the base directory to resolve non-relative module names. */
32+
"paths": {
33+
"@codebolt/codeboltjs": ["./src/index"],
34+
"@codebolt/codeboltjs/*": ["./src/*"]
35+
}, /* Specify a set of entries that re-map imports to additional lookup locations. */
3336
// "rootDirs": [], /* Allow multiple folders to be treated as one when resolving modules. */
3437
// "typeRoots": [], /* Specify multiple folders that act like './node_modules/@types'. */
3538
"types": ["node"], /* Specify type package names to be included without being referenced in a source file. */
@@ -56,6 +59,7 @@
5659
// "inlineSourceMap": true, /* Include sourcemap files inside the emitted JavaScript. */
5760
// "outFile": "./", /* Specify a file that bundles all outputs into one JavaScript file. If 'declaration' is true, also designates a file that bundles all .d.ts output. */
5861
"outDir": "dist", /* Specify an output folder for all emitted files. */
62+
"declarationDir": "dist", /* Specify the output directory for generated declaration files. */
5963
// "removeComments": true, /* Disable emitting comments. */
6064
// "noEmit": true, /* Disable emitting files from a compilation. */
6165
// "importHelpers": true, /* Allow importing helper functions from tslib once per project, instead of including them per-file. */

0 commit comments

Comments
 (0)