diff --git a/.vscode/settings.json b/.vscode/settings.json index 9fd2d42308..b55adad397 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,7 +1,7 @@ { "[typescript]": { "editor.formatOnSave": false, - "editor.defaultFormatter": "dbaeumer.vscode-eslint" + "editor.defaultFormatter": "vscode.typescript-language-features" }, "[javascript]": { "editor.formatOnSave": false, diff --git a/content/ai/aiChatDialogue/index-en-US.md b/content/ai/aiChatDialogue/index-en-US.md index 55792d1e8e..5ab4ffa385 100644 --- a/content/ai/aiChatDialogue/index-en-US.md +++ b/content/ai/aiChatDialogue/index-en-US.md @@ -1,6 +1,6 @@ --- localeCode: en-US -order: 101 +order: 102 category: Plus title: AIChatDialogue icon: doc-aiDialogue diff --git a/content/ai/aiChatDialogue/index.md b/content/ai/aiChatDialogue/index.md index 89ae9dd233..ddc95332e7 100644 --- a/content/ai/aiChatDialogue/index.md +++ b/content/ai/aiChatDialogue/index.md @@ -1,6 +1,6 @@ --- localeCode: zh-CN -order: 101 +order: 102 category: Ai title: AIChatDialogue AI对话 icon: doc-aiDialogue diff --git a/content/ai/aiChatInput/index-en-US.md b/content/ai/aiChatInput/index-en-US.md index 5cebfb2bb1..8c184283c6 100644 --- a/content/ai/aiChatInput/index-en-US.md +++ b/content/ai/aiChatInput/index-en-US.md @@ -1,6 +1,6 @@ --- localeCode: en-US -order: 100 +order: 101 category: Ai title: AIChatInput icon: doc-aiInput diff --git a/content/ai/aiChatInput/index.md b/content/ai/aiChatInput/index.md index 3f37948698..611ea8789e 100644 --- a/content/ai/aiChatInput/index.md +++ b/content/ai/aiChatInput/index.md @@ -1,6 +1,6 @@ --- localeCode: zh-CN -order: 100 +order: 101 category: Ai title: AIChatInput 聊天输入框 icon: doc-aiInput diff --git a/content/ai/clientAI/index-en-US.md b/content/ai/clientAI/index-en-US.md new file mode 100644 index 0000000000..e300e4d9dd --- /dev/null +++ b/content/ai/clientAI/index-en-US.md @@ -0,0 +1,1039 @@ +--- +localeCode: en-US +order: 100 +category: Ai +title: Client Side AI +icon: doc-clientAI +width: 60% +brief: Running AI in browser +showNew: true +--- + +## Introduction + +`ClientAI` is a client-side AI chat component implemented based on [MLC Engine](https://github.com/mlc-ai/mlc-llm), supporting running AI models directly in the browser without a backend server, suitable for quickly integrating LLM into websites. + +### Typical Use Cases + +The `ClientAI` component is suitable for the following typical use cases: + +- **Web-based Knowledge Q&A**: Build localized knowledge Q&A systems based on website content, allowing users to get answers directly within the page without redirecting or calling external services +- **Intelligent Customer Service**: Intelligent customer service systems integrated with Tool Calling capabilities, capable of calling business APIs to query orders, accounts, and other information for more accurate customer service +- **Search Query Rewriting**: Perform semantic understanding and rewriting optimization of user-entered search keywords to improve the accuracy and relevance of search results +- **Multi-text Submission Validation**: Perform consistency validation, format checking, and quality assessment on multiple text segments submitted by users to ensure content complies with business standards +- **User Input Pre-screening**: Real-time compliance detection and sensitive information identification of user input content, providing risk warnings and content filtering before submission +- **Complex Address Parsing**: Automatically segment complex mailing addresses entered by users, extracting structured information such as province, city, district, street, and house number to improve form filling efficiency +- **Offline AI Applications**: AI application scenarios that require completely offline operation, providing intelligent interaction capabilities without relying on network connections +- **Privacy-sensitive Scenarios**: Application scenarios with extremely high data privacy requirements, where all data processing is completed locally and data is not uploaded to servers + +### Core Features + +- **Fully Local Execution**: Models run in the browser, data is not uploaded to servers, protecting user privacy +- **Multiple Model Support**: Supports Qwen, Hermes, and other model series +- **Qwen Model Enhancements**: Since the underlying inference engine does not support CoT and Tool Calling for Qwen models, we implemented CoT and Tool Calling support for Qwen series models under MLC Engine +- **Worker Support**: Supports running in Web Workers to avoid blocking the main thread +- **Singleton Pattern**: Multiple component instances share the same model, avoiding duplicate downloads + +### Qwen Model Implementation + +`ClientAI` provides special optimizations and extensions for Qwen series models: + +1. **CoT**: + - By analyzing Qwen's training process and tokenizer config, we implemented the ability to freely toggle CoT in Qwen's non-instruct models + - Qwen3 models support `` tags to display thinking processes + - The component automatically parses and renders thinking content + - You can disable thinking by adding `/no_think` tag to get direct answers + +2. **Tool Calling (Function Calling)**: + - The underlying MLC Engine's native Function Calling only supports Hermes series models + - By analyzing Qwen's training process and tokenizer config, we extended Tool Calling support for Qwen series models under MLC Engine + +## Demos + +### How to Import + +```jsx import +import { ClientAI } from '@douyinfe/semi-ui'; +``` + +### Basic Usage + +The `ClientAI` component provides pre-configured engine configurations. You need to choose the appropriate configuration based on your website's target users: + +- **If your website targets international users**, use `ClientAI.Qwen3_1_7B_EngineConfig` (using Hugging Face + GitHub) +- **If your website targets users in China**, use `ClientAI.Qwen3_1_7B_EngineConfigCN` (using ModelScope + jsDelivr CDN) + +`modelId` can be obtained from the engine configuration, e.g.: `ClientAI.Qwen3_1_7B_EngineConfig.appConfig.model_list[0].model_id`. + +The following example is for international users: + +```jsx live=true dir="column" noInline=true +import React from 'react'; +import { ClientAI } from '@douyinfe/semi-ui'; + +function Basic() { + // Get modelId from engine configuration + const engineConfig = ClientAI.Qwen3_1_7B_EngineConfig; + const modelId = engineConfig.appConfig.model_list[0].model_id; + + return ( + + ); +} + +render(); +``` + +### Self-Hosting Model Files + +The `ClientAI` component automatically downloads model files from external data sources on first run: +- **International users**: Downloads model weights and WASM runtime files from Hugging Face and GitHub Raw +- **China users**: Downloads model weights and WASM runtime files from ModelScope and jsDelivr CDN + +Downloaded model files are cached in the browser's IndexedDB, so subsequent uses don't require re-downloading. + +If you don't want to download dependencies from external data sources, you can choose to download model files to your own CDN or cloud storage, then configure custom URLs pointing to your own addresses. This avoids relying on third-party service availability and provides better download speeds and stability. + +**Step 1: Download Model Files** + +You need to download the following files: +- **Model weights**: Download the complete model repository from [Hugging Face](https://huggingface.co/mlc-ai/Qwen3-1.7B-q4f32_1-MLC) or [ModelScope](https://modelscope.cn/models/mlc-ai/Qwen3-1.7B-q4f32_1-MLC) +- **WASM file**: Download the WebGPU runtime from [GitHub](https://raw.githubusercontent.com/mlc-ai/binary-mlc-llm-libs/main/web-llm-models/v0_2_80/Qwen3-1.7B-q4f32_1-ctx4k_cs1k-webgpu.wasm) + +**Step 2: Deploy to Your CDN/Cloud Storage** + +Deploy the downloaded files to your cloud storage service and get the access URLs. + +**Step 3: Configure Custom URLs** + +```jsx +import React from 'react'; +import { ClientAI } from '@douyinfe/semi-ui'; + +function CustomModel() { + // Self-hosting configuration example + const customEngineConfig = { + appConfig: { + useIndexedDBCache: true, + model_list: [ + { + // Replace with your own model weights URL + model: 'https://your-cdn.com/models/Qwen3-1.7B-q4f32_1-MLC', + model_id: 'Qwen3-1.7B-q4f32_1-MLC', + // Replace with your own WASM file URL + model_lib: 'https://your-cdn.com/wasm/Qwen3-1.7B-q4f32_1-ctx4k_cs1k-webgpu.wasm', + vram_required_MB: 2635.44, + low_resource_required: true, + // Optional: Override model default configuration + overrides: { + // Context window size, Qwen3-1.7B supports up to 40960 tokens + context_window_size: 40960, + }, + }, + ], + }, + initProgressCallback: (progress) => { + console.log('Model loading progress:', progress); + }, + }; + + // Get modelId from configuration + const modelId = customEngineConfig.appConfig.model_list[0].model_id; + + return ( + + ); +} +``` + +**Configuration Reference**: + +| Property | Description | Default | +|----------|-------------|---------| +| `model` | URL of the model weights file | - | +| `model_id` | Unique identifier for the model | - | +| `model_lib` | URL of the WebGPU WASM runtime file | - | +| `low_resource_required` | Whether to use low resource mode | `false` | +| `overrides.context_window_size` | Context window size (tokens), Qwen3-1.7B supports up to 40960 | Model default | + +### Error Handling + +You can handle errors through the `onError` callback: + +```jsx live=true dir="column" noInline=true +import React, { useState } from 'react'; +import { ClientAI, Toast } from '@douyinfe/semi-ui'; + +function WithErrorHandler() { + const engineConfig = ClientAI.Qwen3_1_7B_EngineConfig; + const modelId = engineConfig.appConfig.model_list[0].model_id; + + const handleError = (error) => { + Toast.error(`Error: ${error.message}`); + console.error('ClientAI error:', error); + }; + + return ( + + ); +} + +render(); +``` + +### Tool Calling (Function Calling) + +`ClientAI` supports Tool Calling functionality, allowing AI to call user-defined tool functions. + +#### Technical Background + +The underlying MLC Engine's native Function Calling feature **only supports Hermes series models**, and does not support Qwen and other models. + +By analyzing Qwen's training process and tokenizer config, we extended **Tool Calling support for Qwen series models** under MLC Engine. This enables lightweight Qwen models (such as 1.7B) to achieve tool calling capabilities in the browser. + +**Usage Tips**: +- Try asking AI "What's the weather like in Beijing today?" +- Try asking AI "Calculate 123 * 456 for me" +- Try asking AI "What time is it now?" + +```jsx live=true dir="column" noInline=true +import React from 'react'; +import { ClientAI, Toast } from '@douyinfe/semi-ui'; + +function ToolCallingDemo() { + const engineConfig = ClientAI.Qwen3_1_7B_EngineConfig; + const modelId = engineConfig.appConfig.model_list[0].model_id; + + // Define available tools + const tools = [ + { + type: 'function', + function: { + name: 'get_weather', + description: 'Get current weather information for a specified city', + parameters: { + type: 'object', + properties: { + city: { + type: 'string', + description: 'City name, e.g.: Beijing, Shanghai, Shenzhen' + } + }, + required: ['city'] + } + } + }, + { + type: 'function', + function: { + name: 'calculate', + description: 'Perform mathematical calculations', + parameters: { + type: 'object', + properties: { + expression: { + type: 'string', + description: 'Mathematical expression, e.g.: 2+2, 10*5, 100/4' + } + }, + required: ['expression'] + } + } + }, + { + type: 'function', + function: { + name: 'get_current_time', + description: 'Get current time', + parameters: { + type: 'object', + properties: {}, + required: [] + } + } + } + ]; + + // Handle Tool Call + // Component will automatically call this function, wait for results, and automatically send results to continue conversation + const handleToolCall = async (toolCalls, rawOutput) => { + console.log('Received tool calls:', toolCalls); + + // Mock weather data + const weatherData = { + 'Beijing': { temp: '5°C', weather: 'Sunny', humidity: '30%' }, + 'Shanghai': { temp: '12°C', weather: 'Cloudy', humidity: '65%' }, + 'Shenzhen': { temp: '22°C', weather: 'Sunny', humidity: '70%' }, + 'Guangzhou': { temp: '20°C', weather: 'Overcast', humidity: '75%' }, + }; + + // Execute all tool calls + return toolCalls.map((toolCall) => { + const { call_id, name, arguments: argsStr } = toolCall; + let result = ''; + + try { + const args = JSON.parse(argsStr || '{}'); + + if (name === 'get_weather') { + const city = args.city; + const data = weatherData[city]; + if (data) { + result = JSON.stringify({ + city, + temperature: data.temp, + weather: data.weather, + humidity: data.humidity + }); + } else { + result = JSON.stringify({ error: 'Weather data not found for city ' + city }); + } + } else if (name === 'calculate') { + const expression = args.expression.replace(/[^0-9+\-*/().]/g, ''); + const calcResult = Function('"use strict"; return (' + expression + ')')(); + result = JSON.stringify({ expression: args.expression, result: calcResult }); + } else if (name === 'get_current_time') { + const now = new Date(); + result = JSON.stringify({ + time: now.toLocaleTimeString('en-US'), + date: now.toLocaleDateString('en-US'), + timestamp: now.getTime() + }); + } else { + result = JSON.stringify({ error: 'Unknown tool: ' + name }); + } + + Toast.success('Tool ' + name + ' executed successfully'); + return { call_id, name, arguments: argsStr, result, status: 'success' }; + } catch (e) { + Toast.error('Tool ' + name + ' execution failed'); + return { call_id, name, arguments: argsStr, result: JSON.stringify({ error: e.message }), status: 'error' }; + } + }); + }; + + return ( + Toast.error(error.message)} + /> + ); +} + +render(); +``` + +**Notes**: +1. Tool Calling currently only supports Qwen series models +2. Using `handleToolCall` prop, the component will automatically handle tool calls and result sending, no need to manually call `sendToolResults` +3. `handleToolCall` returns `Promise` or `ToolCallResult[]`, and the component will automatically send results to continue conversation +4. Tool definitions follow OpenAI Function Calling format specifications + +### Deep Think CoT + +`ClientAI` supports Deep Think CoT toggle. When enabled, AI will perform deeper reasoning (showing thinking process). When disabled, it will add `/no_think` tag to let the model skip thinking and answer directly. + +```jsx live=true dir="column" noInline=true +import React from 'react'; +import { ClientAI } from '@douyinfe/semi-ui'; + +function DeepThinkDemo() { + const engineConfig = ClientAI.Qwen3_1_7B_EngineConfig; + const modelId = engineConfig.appConfig.model_list[0].model_id; + + return ( + + ); +} + +render(); +``` + +**Usage Instructions**: +- When `showDeepThinkButton` is set to `true`, a "Deep Think CoT" button will appear in the bottom left corner of the input box +- Click the button to toggle Deep Think CoT on/off +- When enabled: AI will output thinking process wrapped in `...` tags +- When disabled: `/no_think` tag will be automatically added to the end of the message, allowing Qwen3 model to skip thinking + +### Custom Render + +If you need to completely customize the UI (using your own message list and input box styles), you can use the `render` prop. After passing the `render` function, the component will no longer render the default `AIChatDialogue` and `AIChatInput`, but will call your render function instead. + +You can also choose not to render any UI by returning `null` from the `render` prop, and then call AI capabilities through the `sendMessage` method. This approach is suitable for scenarios such as search query rewriting and text pre-screening where users are unaware of AI and do not require interaction. + +```jsx live=true dir="column" noInline=true +import React, { useState, useRef } from 'react'; +import { ClientAI, Button, Input, Spin } from '@douyinfe/semi-ui'; + +// Custom render content component +function CustomContent(props) { + const { + loading, + error, + messages, + isGenerating, + enableDeepThink, + sendMessage, + stopGenerate, + clearMessages, + setEnableDeepThink + } = props; + + const [inputValue, setInputValue] = useState(''); + + if (loading) { + return ( +
+ +

Loading model...

+
+ ); + } + + if (error) { + return
Error: {error}
; + } + + // Helper function to render message content + const renderMessageContent = (content) => { + if (typeof content === 'string') { + return content; + } + if (Array.isArray(content)) { + return content.map((item, i) => { + // Handle nested structure returned by chatInputToMessage (user messages) + if (item.type === 'message' && Array.isArray(item.content)) { + return item.content.map((subItem, j) => ( + {subItem.text || ''} + )); + } + // Handle Deep Think CoT content (reasoning type) + if (item.type === 'reasoning' && Array.isArray(item.summary)) { + const thinkText = item.summary.map(s => s.text).join(''); + return ( +
+ 💭 {thinkText} + {item.status === 'in_progress' && ...} +
+ ); + } + // Handle regular text + return {item.text || ''}; + }); + } + return JSON.stringify(content); + }; + + const handleSend = () => { + if (inputValue.trim()) { + sendMessage(inputValue); + setInputValue(''); + } + }; + + return ( +
+ {/* Custom message list */} +
+ {messages.length === 0 ? ( +
+ No messages yet, start a conversation! +
+ ) : ( + messages.map((msg) => ( +
+
+ {msg.role === 'user' ? 'User' : 'AI'} +
+
+ {renderMessageContent(msg.content)} + {msg.status === 'in_progress' && |} +
+
+ )) + )} +
+ + {/* Custom input area */} +
+ + {isGenerating ? ( + + ) : ( + + )} + + +
+
+ ); +} + +function CustomRenderDemo() { + const engineConfig = ClientAI.Qwen3_1_7B_EngineConfig; + const modelId = engineConfig.appConfig.model_list[0].model_id; + + return ( + } + /> + ); +} + +render(); +``` + +### Using Qwen3-4B Model + +`ClientAI` also provides Qwen3-4B model configuration, which has stronger capabilities than the 1.7B model. Suitable for scenarios requiring higher model capabilities, especially scenarios that require more world knowledge. + +```jsx +import React from 'react'; +import { ClientAI } from '@douyinfe/semi-ui'; + +function Qwen4B() { + // Using Qwen3-4B model configuration (international users) + const engineConfig = ClientAI.Qwen3_4B_EngineConfig; + const modelId = engineConfig.appConfig.model_list[0].model_id; + + return ( + + ); +} +``` + +**Model Selection Recommendations**: +- **Qwen3-1.7B**: Lightweight model, suitable for most scenarios +- **Qwen3-4B**: More capable model, suitable for scenarios requiring higher model capabilities, especially scenarios that require more world knowledge + +**ClientAIRenderProps Parameter Description**: + +| Property | Description | Type | +|----------|-------------|------| +| loading | Whether the model is loading | `boolean` | +| error | Error message | `string \| null` | +| messages | Message list | `Message[]` | +| isGenerating | Whether a reply is being generated | `boolean` | +| enableDeepThink | Whether Deep Think CoT is enabled | `boolean` | +| sendMessage | Send message | `(text: string) => Promise` | +| stopGenerate | Stop generation | `() => void` | +| clearMessages | Clear messages | `() => void` | +| setEnableDeepThink | Set Deep Think CoT toggle | `(enable: boolean) => void` | +| sendToolResults | Send Tool execution results | `(results: ToolCallResult[]) => Promise` | + +### Using Worker + +`ClientAI` runs models in the main thread by default. If you don't provide `worker.url`, the model will run in the main thread. Running models in the main thread may block the UI, so it's recommended to switch to Worker mode for better performance. + +To switch to Worker mode to avoid blocking the main thread, you need to complete the following two steps: + +**Step 1: Create Worker File** + +Create a Worker file (e.g., `worker.ts` or `worker.js`): + +```typescript +// worker.ts +import { WebWorkerMLCEngineHandler } from '@mlc-ai/web-llm'; + +const handler = new WebWorkerMLCEngineHandler(); + +// Handle messages from the main thread +self.onmessage = (msg: MessageEvent) => { + handler.onmessage(msg); +}; +``` + +**Worker File Notes**: +- `WebWorkerMLCEngineHandler` handles communication with the main thread and processes incoming requests +- The worker thread receives messages and processes actual computation using a hidden engine, then returns results back to the main thread via messages +- Worker files need to use ES Module format (`type: 'module'`) +- MLC Engine dependencies are already included in the component library, no additional installation required + +**Step 2: Configure Worker URL in Component** + +Pass the Worker file URL via the `worker` prop in the component: + +```jsx +import React from 'react'; +import { ClientAI } from '@douyinfe/semi-ui'; + +function WithWorker() { + const engineConfig = ClientAI.Qwen3_1_7B_EngineConfig; + const modelId = engineConfig.appConfig.model_list[0].model_id; + + return ( + + ); +} +``` + +**Configuration Notes**: +- `worker.url`: **Required**, the URL of the Worker file. You must provide this parameter to switch to Worker mode +- `worker.enabled`: Whether to enable Worker, defaults to `true`. Set to `false` to run in the main thread +- **Important**: Worker mode is only used when both `worker.url` is provided and `worker.enabled !== false`. Otherwise, it runs in the main thread by default + +**Using Build Tools**: + +**Vite**: +```typescript +worker: { + url: new URL('./worker.ts', import.meta.url).href, + enabled: true, +} +``` + +**Webpack**: +```typescript +// Need to install worker-loader or use Webpack 5's Worker support +import Worker from './worker.ts?worker'; + +worker: { + url: Worker, + enabled: true, +} +``` + +**Notes**: +- Worker files need to import `WebWorkerMLCEngineHandler` from `@mlc-ai/web-llm` +- Ensure build tools are properly configured for Worker support +- Worker files must use ES Module format (`type: 'module'`) + +### Modify User Input + +The `onUserMessage` callback can modify user input before sending, and the modified content will be used for both display and sending to AI: + +```jsx live=true dir="column" noInline=true +import React from 'react'; +import { ClientAI } from '@douyinfe/semi-ui'; + +function OnUserMessageExample() { + const engineConfig = ClientAI.Qwen3_1_7B_EngineConfig; + const modelId = engineConfig.appConfig.model_list[0].model_id; + + return ( + { + // Automatically add prefix + return `Please answer briefly: ${userContent}`; + }} + /> + ); +} + +render(); +``` + +### Intercept AI Call + +The `beforeAIInput` callback can return a custom reply before AI call. If a non-empty string is returned, it will skip the AI call and use the returned reply directly: + +```jsx live=true dir="column" noInline=true +import React from 'react'; +import { ClientAI } from '@douyinfe/semi-ui'; + +function BeforeAIInputExample() { + const engineConfig = ClientAI.Qwen3_1_7B_EngineConfig; + const modelId = engineConfig.appConfig.model_list[0].model_id; + + return ( + { + const lastMessage = messages[messages.length - 1]; + // If user asks "hello", return fixed reply + if (lastMessage && lastMessage.content && lastMessage.content.includes('hello')) { + return 'Hello! I am an AI assistant, glad to serve you.'; + } + // Return empty string to call AI normally + return ''; + }} + /> + ); +} + +render(); +``` + +### Modify AI Reply + +The `afterAIInput` callback can modify AI reply content after receiving it: + +```jsx live=true dir="column" noInline=true +import React from 'react'; +import { ClientAI } from '@douyinfe/semi-ui'; + +function AfterAIInputExample() { + const engineConfig = ClientAI.Qwen3_1_7B_EngineConfig; + const modelId = engineConfig.appConfig.model_list[0].model_id; + + return ( + { + // Add prefix to AI reply + return `[AI Reply] ${aiContent}`; + }} + /> + ); +} + +render(); +``` + +### Control Streaming Display + +The `stream` parameter controls whether to stream AI reply display. When set to `false`, it will wait for the stream to complete before displaying once: + +```jsx live=true dir="column" noInline=true +import React from 'react'; +import { ClientAI } from '@douyinfe/semi-ui'; + +function StreamExample() { + const engineConfig = ClientAI.Qwen3_1_7B_EngineConfig; + const modelId = engineConfig.appConfig.model_list[0].model_id; + + return ( + + ); +} + +render(); +``` + +### Set Default Conversation Messages + +`defaultMessages` is used to set initial conversation history, and these messages will be displayed when the component loads: + +```jsx live=true dir="column" noInline=true +import React from 'react'; +import { ClientAI } from '@douyinfe/semi-ui'; + +function DefaultMessagesExample() { + const engineConfig = ClientAI.Qwen3_1_7B_EngineConfig; + const modelId = engineConfig.appConfig.model_list[0].model_id; + + const defaultMessages = [ + { + id: 'msg-1', + role: 'user', + content: 'Hello, please introduce yourself', + createdAt: Date.now() - 60000, + status: 'completed', + }, + { + id: 'msg-2', + role: 'assistant', + content: 'Hello! I am an AI assistant, glad to serve you. I can help you answer questions, provide information and suggestions.', + createdAt: Date.now() - 30000, + status: 'completed', + }, + ]; + + return ( + + ); +} + +render(); +``` + +## API Reference + +### ClientAI + +| Property | Description | Type | Default | +|----------|-------------|------|---------| +| afterAIInput | Callback after AI reply, can modify AI reply content | `(aiContent: string, messages: Message[]) => string \| Promise` | - | +| beforeAIInput | Callback before AI reply, can intercept AI call and return custom reply. Returns non-empty string will be used as AI reply, returns empty string will call AI normally | `(messages: Message[]) => string \| Promise` | - | +| chatOpts | Chat options, can configure tools, etc. Override model default configuration | `ChatOptions \| ChatOptions[]` | - | +| className | Custom class name | `string` | - | +| defaultEnableDeepThink | Default state of Deep Think CoT | `boolean` | `true` | +| defaultMessages | Default conversation messages, used to set initial conversation history | `Message[]` | - | +| dialogueProps | Pass-through props for AIChatDialogue | `Partial` | - | +| engineConfig | Engine configuration, **required**. Use `ClientAI.Qwen3_1_7B_EngineConfig` / `ClientAI.Qwen3_1_7B_EngineConfigCN` (1.7B model) or `ClientAI.Qwen3_4B_EngineConfig` / `ClientAI.Qwen3_4B_EngineConfigCN` (4B model). You can also choose to self-host model files | `MLCEngineConfig` | - | +| inputProps | Pass-through props for AIChatInput | `Partial` | - | +| modelId | Model ID, **required**. Can be obtained from engine configuration, e.g.: `engineConfig.appConfig.model_list[0].model_id` | `string \| string[]` | - | +| handleToolCall | Tool call handler function, component will automatically call this function, wait for results, and automatically send results to continue conversation | `(toolCalls: ToolCall[], rawOutput: string) => Promise \| ToolCallResult[]` | - | +| onError | Error callback | `(error: Error) => void` | - | +| onToolCall | Tool call callback, triggered when AI output contains tool_call (for notification only, requires manual call to sendToolResults) | `(toolCalls: ToolCall[], rawOutput: string) => void` | - | +| onUserMessage | Callback before user message is sent, can modify user input content. The returned string will be used for both display and sending to AI | `(userContent: string, messages: Message[]) => string` | - | +| render | Custom render function. After passing this function, UI rendering will be completely controlled by the user | `(props: ClientAIRenderProps) => ReactNode` | - | +| roleConfig | Role configuration for user, assistant, system, etc. (name, avatar, etc.) | `RoleConfig` | `{ user: { name: 'User' }, assistant: { name: 'AI Assistant' }, system: { name: 'System' } }` (default does not include avatar) | +| showDeepThinkButton | Whether to show Deep Think CoT button | `boolean` | `false` | +| stream | Control whether to stream AI reply display. When `false`, wait for stream to complete before displaying once | `boolean` | `true` | +| style | Custom style | `React.CSSProperties` | - | +| systemPrompt | System prompt | `string` | Dynamically set based on browser language: `'You are a helpful AI assistant. Reply to users in English.'` for English, `'你是一个有用的 AI 助手。使用中文回复用户。'` for Chinese | +| worker | Worker configuration | `{ url?: string; enabled?: boolean }` | `{ enabled: true }` (runs in main thread by default, requires `url` to use Worker) | + +### Methods + +The following methods can be called via `ref`: + +| Method | Description | Parameters | +|--------|-------------|------------| +| sendToolResults | Send Tool execution results to let AI continue the conversation (usually not needed to call manually, recommend using `handleToolCall` prop) | `(toolResults: ToolCallResult[]) => Promise` | + +### ToolCall Type + +```typescript +interface ToolCall { + type: 'function_call'; + call_id: string; + name: string; + arguments: string; + status: string; +} +``` + +### ToolCallResult Type + +```typescript +interface ToolCallResult { + call_id: string; + name: string; + arguments: string; + result: string; + status: 'success' | 'error'; +} +``` + +### Static Properties + +The `ClientAI` component provides the following static properties for engine configuration: + +**Qwen3-1.7B Model** (lightweight): +- `ClientAI.Qwen3_1_7B_EngineConfig` - International users engine configuration (using Hugging Face + GitHub Raw) +- `ClientAI.Qwen3_1_7B_EngineConfigCN` - China users engine configuration (using ModelScope + jsDelivr CDN) + +**Qwen3-4B Model** (more capable): +- `ClientAI.Qwen3_4B_EngineConfig` - International users engine configuration (using Hugging Face + GitHub Raw) +- `ClientAI.Qwen3_4B_EngineConfigCN` - China users engine configuration (using ModelScope + jsDelivr CDN) + +> 💡 **Tip**: You can choose to download model files to your own CDN or cloud storage, then configure custom URLs. See [Self-Hosting Model Files](#self-hosting-model-files) section. + +### Type Reference + +Types used by the `ClientAI` component (such as `MLCEngineConfig`, `ChatOptions`, `AppConfig`, etc.) are all re-exported from `@douyinfe/semi-foundation/clientAI/interface`. These types are consistent with MLC Engine SDK. You can refer to the [MLC Engine documentation](https://github.com/mlc-ai/mlc-llm) for detailed descriptions of these types. + +You can also import these types from `@douyinfe/semi-foundation/clientAI/interface`: + +```typescript +import type { + MLCEngineConfig, + ChatOptions, + AppConfig, + WebWorkerMLCEngine, + // ... other types +} from '@douyinfe/semi-foundation/clientAI/interface'; +``` + +## Notes + +1. **Worker File Implementation**: When using Worker mode, you need to create a Worker file. The Worker file needs to import `WebWorkerMLCEngineHandler` and handle messages. Refer to the [Using Worker](#using-worker) section above for detailed implementation. + +2. **Model Loading Time**: The first model load may take a long time. It is recommended to show a loading state to users. The component automatically displays a loading progress bar. + +3. **Browser Compatibility**: MLC Engine requires browser support for WebGPU. Please ensure that the target browser supports this feature. + +4. **Memory Usage**: Running AI models consumes a lot of memory. It is recommended to use on devices with sufficient memory. + +5. **Model File Size**: Model files may be large, and the first download takes time. It is recommended to use IndexedDB caching. + +6. **Type Imports**: All MLC Engine-related types can be imported from `@douyinfe/semi-foundation/clientAI/interface`. These types are consistent with the MLC Engine SDK, making it convenient to refer to the documentation. + +7. **Tool Calling Support**: The underlying MLC Engine natively only supports Function Calling for Hermes series models. We extended support for Qwen series models by analyzing Qwen's training process and tokenizer config. + +8. **Models Supporting Tool Calling**: + - ✅ Qwen series models (Qwen3-0.6B, Qwen3-1.7B, Qwen3-4B, etc.) + - ✅ Hermes series models (MLC native support) + diff --git a/content/ai/clientAI/index.md b/content/ai/clientAI/index.md new file mode 100644 index 0000000000..8ec85c5dd5 --- /dev/null +++ b/content/ai/clientAI/index.md @@ -0,0 +1,999 @@ +--- +localeCode: zh-CN +order: 100 +category: Ai +title: ClientAI 端侧AI +icon: doc-clientAI +width: 60% +brief: 在浏览器中直接运行 AI 大模型 +showNew: true +--- + +## 简介 + +`ClientAI` 是一个基于 [MLC Engine](https://github.com/mlc-ai/mlc-llm) 实现的客户端 AI 聊天组件,支持在浏览器中直接运行 AI 模型,无需后端服务器,适合快速为网站接入 LLM。 + +### 典型应用场景 + +`ClientAI` 组件适用于以下典型应用场景: + +- **网页知识问答**:基于网站内容构建本地化知识问答系统,用户可直接在页面内获取答案,无需跳转或调用外部服务 +- **智能客服助手**:集成 Tool Calling 能力的智能客服系统,可调用业务接口查询订单、账户等信息,提供更精准的客户服务 +- **搜索查询改写**:对用户输入的搜索关键词进行语义理解和改写优化,提升搜索结果的准确性和相关性 +- **多文本提交校验**:对用户提交的多段文本内容进行一致性校验、格式检查和质量评估,确保内容符合业务规范 +- **用户输入预审核**:实时对用户输入内容进行合规性检测和敏感信息识别,在提交前进行风险提示和内容过滤 +- **复杂地址智能解析**:对用户输入的复杂邮寄地址进行自动分割,提取省市区、街道、门牌号等结构化信息,提升表单填写效率 +- **离线 AI 应用**:需要完全离线运行的 AI 应用场景,不依赖网络连接即可提供智能交互能力 +- **隐私敏感场景**:对数据隐私要求极高的应用场景,所有数据处理在本地完成,数据不上传到服务器 + +### 核心特性 + +- **完全本地运行**:模型在浏览器中运行,数据不上传到服务器,保护用户隐私 +- **支持多种模型**:支持 Qwen、Hermes 等多种模型系列 +- **Qwen 模型增强**:针对底层推理引擎不支持深度思考和 Tool Calling 的情况,为 Qwen 系列模型在 MLC Engine 下支持了深度思考 CoT 和 Tool Calling 功能 +- **Worker 支持**:支持在 Web Worker 中运行,避免阻塞主线程 +- **单例模式**:多个组件实例共享同一个模型,避免重复下载 + +### Qwen 模型实现说明 + +`ClientAI` 对 Qwen 系列模型进行了特殊优化和扩展: + +1. **深度思考 CoT**: + - 通过分析 Qwen 的训练过程和 tokenizer config,我们在 Qwen 的非 instruct 模型下实现了自由开关深度思考 CoT 的能力 + - Qwen3 模型支持 `` 标签来显示思考过程 + - 组件会自动解析并渲染思考内容 + - 可以通过 `/no_think` 标签关闭思考过程,直接输出答案 + +2. **Tool Calling(函数调用)**: + - 底层 MLC Engine 原生的 Function Calling 仅支持 Hermes 系列模型 + - 通过分析 Qwen 的训练过程和 tokenizer config,在 MLC Engine 下为 Qwen 系列模型扩展了 Tool Calling 支持 + +## 代码演示 + +### 如何引入 + +```jsx import +import { ClientAI } from '@douyinfe/semi-ui'; +``` + +### 基本用法 + +`ClientAI` 组件提供了预配置的引擎配置,你需要根据你的网站用户所在地区选择合适的配置: + +- **如果你的网站面向中国大陆用户**,请使用 `ClientAI.Qwen3_1_7B_EngineConfigCN`(使用 ModelScope + jsDelivr CDN) +- **如果你的网站面向国际用户**,请使用 `ClientAI.Qwen3_1_7B_EngineConfig`(使用 Hugging Face + GitHub) + +`modelId` 可以从引擎配置中获取,如:`ClientAI.Qwen3_1_7B_EngineConfigCN.appConfig.model_list[0].model_id`。 + +以下示例适用于中国大陆用户: + +```jsx live=true dir="column" noInline=true +import React from 'react'; +import { ClientAI } from '@douyinfe/semi-ui'; + +function Basic() { + // 从引擎配置中获取 modelId + const engineConfig = ClientAI.Qwen3_1_7B_EngineConfigCN; + const modelId = engineConfig.appConfig.model_list[0].model_id; + + return ( + + ); +} + +render(); +``` + +### 自部署模型文件 + +`ClientAI` 组件在第一次运行时需要联网自动从外部数据源下载模型文件: +- **国际用户**:从 Hugging Face 和 GitHub Raw 下载模型权重和 WASM 运行时文件 +- **中国大陆用户**:从 ModelScope 和 jsDelivr CDN 下载模型权重和 WASM 运行时文件 + +下载的模型文件会缓存在浏览器的 IndexedDB 中,后续使用无需重复下载。 + +如果你不希望从外部数据源下载依赖,可以选择将模型文件下载到自己的 CDN 或 OSS 上,然后修改配置指向自己的地址。这样可以避免依赖第三方服务的可用性,并获得更好的下载速度和稳定性。 + +**步骤 1:下载模型文件** + +你需要下载以下两类文件: +- **模型权重文件**:从 [Hugging Face](https://huggingface.co/mlc-ai/Qwen3-1.7B-q4f32_1-MLC) 或 [ModelScope](https://modelscope.cn/models/mlc-ai/Qwen3-1.7B-q4f32_1-MLC) 下载完整的模型仓库 +- **WASM 文件**:从 [GitHub](https://raw.githubusercontent.com/mlc-ai/binary-mlc-llm-libs/main/web-llm-models/v0_2_80/Qwen3-1.7B-q4f32_1-ctx4k_cs1k-webgpu.wasm) 下载 WebGPU 运行时 + +**步骤 2:部署到你的 CDN/OSS** + +将下载的文件部署到你的云存储服务,获取访问 URL。 + +**步骤 3:配置自定义 URL** + +```jsx +import React from 'react'; +import { ClientAI } from '@douyinfe/semi-ui'; + +function CustomModel() { + // 自部署配置示例 + const customEngineConfig = { + appConfig: { + useIndexedDBCache: true, + model_list: [ + { + // 替换为你自己的模型权重 URL + model: 'https://your-cdn.com/models/Qwen3-1.7B-q4f32_1-MLC', + model_id: 'Qwen3-1.7B-q4f32_1-MLC', + // 替换为你自己的 WASM 文件 URL + model_lib: 'https://your-cdn.com/wasm/Qwen3-1.7B-q4f32_1-ctx4k_cs1k-webgpu.wasm', + vram_required_MB: 2635.44, + low_resource_required: true, + // 可选:覆盖模型默认配置 + overrides: { + // 上下文窗口大小,Qwen3-1.7B 最大支持 40960 tokens + context_window_size: 40960, + }, + }, + ], + }, + initProgressCallback: (progress) => { + console.log('Model loading progress:', progress); + }, + }; + + // 从配置中获取 modelId + const modelId = customEngineConfig.appConfig.model_list[0].model_id; + + return ( + + ); +} +``` + +**配置说明**: + +| 配置项 | 说明 | 默认值 | +|--------|------|--------| +| `model` | 模型权重文件的 URL | - | +| `model_id` | 模型唯一标识符 | - | +| `model_lib` | WebGPU WASM 运行时文件的 URL | - | +| `low_resource_required` | 是否为低资源模式 | `false` | +| `overrides.context_window_size` | 上下文窗口大小(tokens),Qwen3-1.7B 最大支持 40960 | 模型默认值 | + +### Tool Calling (函数调用) + +`ClientAI` 支持 Tool Calling 功能,可以让 AI 调用你定义的工具函数。 + +#### 技术背景 + +底层 MLC Engine 原生的 Function Calling 功能**仅支持 Hermes 系列模型**,不支持 Qwen 等其他模型。 + +我们通过分析 Qwen 的训练过程和 tokenizer config,在 MLC Engine 下为 **Qwen 系列模型扩展了 Tool Calling 支持**。这使得轻量级的 Qwen 模型(如 1.7B)也能在浏览器端实现工具调用能力。 + +**使用提示**: +- 尝试问 AI "北京今天天气怎么样?" +- 尝试问 AI "帮我计算 123 * 456" +- 尝试问 AI "现在几点了?" + +```jsx live=true dir="column" noInline=true +import React from 'react'; +import { ClientAI, Toast } from '@douyinfe/semi-ui'; + +function ToolCallingDemo() { + const engineConfig = ClientAI.Qwen3_1_7B_EngineConfigCN; + const modelId = engineConfig.appConfig.model_list[0].model_id; + + // 定义可用的工具 + const tools = [ + { + type: 'function', + function: { + name: 'get_weather', + description: '获取指定城市的当前天气信息', + parameters: { + type: 'object', + properties: { + city: { + type: 'string', + description: '城市名称,如:北京、上海、深圳' + } + }, + required: ['city'] + } + } + }, + { + type: 'function', + function: { + name: 'calculate', + description: '执行数学计算', + parameters: { + type: 'object', + properties: { + expression: { + type: 'string', + description: '数学表达式,如:2+2, 10*5, 100/4' + } + }, + required: ['expression'] + } + } + }, + { + type: 'function', + function: { + name: 'get_current_time', + description: '获取当前时间', + parameters: { + type: 'object', + properties: {}, + required: [] + } + } + } + ]; + + // 处理 Tool Call + // 组件会自动调用此函数,等待返回结果后自动发送结果继续对话 + const handleToolCall = async (toolCalls, rawOutput) => { + console.log('Received tool calls:', toolCalls); + + // 模拟天气数据 + const weatherData = { + '北京': { temp: '5°C', weather: '晴', humidity: '30%' }, + '上海': { temp: '12°C', weather: '多云', humidity: '65%' }, + '深圳': { temp: '22°C', weather: '晴', humidity: '70%' }, + '广州': { temp: '20°C', weather: '阴', humidity: '75%' }, + }; + + // 执行所有 tool calls + return toolCalls.map((toolCall) => { + const { call_id, name, arguments: argsStr } = toolCall; + let result = ''; + + try { + const args = JSON.parse(argsStr || '{}'); + + if (name === 'get_weather') { + const city = args.city; + const data = weatherData[city]; + if (data) { + result = JSON.stringify({ + city, + temperature: data.temp, + weather: data.weather, + humidity: data.humidity + }); + } else { + result = JSON.stringify({ error: '未找到城市 ' + city + ' 的天气数据' }); + } + } else if (name === 'calculate') { + const expression = args.expression.replace(/[^0-9+\-*/().]/g, ''); + const calcResult = Function('"use strict"; return (' + expression + ')')(); + result = JSON.stringify({ expression: args.expression, result: calcResult }); + } else if (name === 'get_current_time') { + const now = new Date(); + result = JSON.stringify({ + time: now.toLocaleTimeString('zh-CN'), + date: now.toLocaleDateString('zh-CN'), + timestamp: now.getTime() + }); + } else { + result = JSON.stringify({ error: '未知的工具: ' + name }); + } + + Toast.success('工具 ' + name + ' 执行成功'); + return { call_id, name, arguments: argsStr, result, status: 'success' }; + } catch (e) { + Toast.error('工具 ' + name + ' 执行失败'); + return { call_id, name, arguments: argsStr, result: JSON.stringify({ error: e.message }), status: 'error' }; + } + }); + }; + + return ( + Toast.error(error.message)} + /> + ); +} + +render(); +``` + +**注意事项**: +1. Tool Calling 目前仅支持 Qwen 系列模型 +2. 使用 `handleToolCall` prop,组件会自动处理工具调用和结果发送,无需手动调用 `sendToolResults` +3. `handleToolCall` 返回 `Promise` 或 `ToolCallResult[]`,组件会自动发送结果继续对话 +4. 工具定义遵循 OpenAI Function Calling 的格式规范 + +### 深度思考 CoT + +`ClientAI` 支持深度思考 CoT 开关,当开启时 AI 会进行更深入的推理思考(显示思考过程),当关闭时会添加 `/no_think` 标签让模型跳过思考过程直接回答。 + +```jsx live=true dir="column" noInline=true +import React from 'react'; +import { ClientAI } from '@douyinfe/semi-ui'; + +function DeepThinkDemo() { + const engineConfig = ClientAI.Qwen3_1_7B_EngineConfigCN; + const modelId = engineConfig.appConfig.model_list[0].model_id; + + return ( + + ); +} + +render(); +``` + +**使用说明**: +- `showDeepThinkButton` 设为 `true` 时,输入框左下角会显示"深度思考 CoT"按钮 +- 点击按钮可以切换深度思考 CoT 的开启/关闭状态 +- 开启时:AI 会输出 `...` 标签包裹的思考过程 +- 关闭时:消息末尾会自动添加 `/no_think` 标签,让 Qwen3 模型跳过思考 + +### 自定义渲染 + +如果你需要完全自定义 UI(使用自己的消息列表和输入框样式),可以使用 `render` prop。传入 `render` 函数后,组件将不再渲染默认的 `AIChatDialogue` 和 `AIChatInput`,而是调用你的渲染函数。 + +你也可以选择不渲染任何 UI,直接通过 `render` prop 返回 `null`,然后通过 `sendMessage` 方法调用 AI 能力。这种方式适用于搜索查询改写、文本预审核等用户对 AI 无感知、不需要交互的场景。 + +```jsx live=true dir="column" noInline=true +import React, { useState, useRef } from 'react'; +import { ClientAI, Button, Input, Spin } from '@douyinfe/semi-ui'; + +// 自定义渲染的内容组件 +function CustomContent(props) { + const { + loading, + error, + messages, + isGenerating, + enableDeepThink, + sendMessage, + stopGenerate, + clearMessages, + setEnableDeepThink + } = props; + + const [inputValue, setInputValue] = useState(''); + + if (loading) { + return ( +
+ +

正在加载模型...

+
+ ); + } + + if (error) { + return
错误: {error}
; + } + + // 渲染消息内容的辅助函数 + const renderMessageContent = (content) => { + if (typeof content === 'string') { + return content; + } + if (Array.isArray(content)) { + return content.map((item, i) => { + // 处理 chatInputToMessage 返回的嵌套结构(用户消息) + if (item.type === 'message' && Array.isArray(item.content)) { + return item.content.map((subItem, j) => ( + {subItem.text || ''} + )); + } + // 处理深度思考 CoT 内容(reasoning 类型) + if (item.type === 'reasoning' && Array.isArray(item.summary)) { + const thinkText = item.summary.map(s => s.text).join(''); + return ( +
+ 💭 {thinkText} + {item.status === 'in_progress' && ...} +
+ ); + } + // 处理普通文本 + return {item.text || ''}; + }); + } + return JSON.stringify(content); + }; + + const handleSend = () => { + if (inputValue.trim()) { + sendMessage(inputValue); + setInputValue(''); + } + }; + + return ( +
+ {/* 自定义消息列表 */} +
+ {messages.length === 0 ? ( +
+ 暂无消息,开始对话吧! +
+ ) : ( + messages.map((msg) => ( +
+
+ {msg.role === 'user' ? '用户' : 'AI'} +
+
+ {renderMessageContent(msg.content)} + {msg.status === 'in_progress' && |} +
+
+ )) + )} +
+ + {/* 自定义输入区域 */} +
+ + {isGenerating ? ( + + ) : ( + + )} + + +
+
+ ); +} + +function CustomRenderDemo() { + const engineConfig = ClientAI.Qwen3_1_7B_EngineConfigCN; + const modelId = engineConfig.appConfig.model_list[0].model_id; + + return ( + } + /> + ); +} + +render(); +``` + +### 使用 Qwen3-4B 模型 + +`ClientAI` 还提供了 Qwen3-4B 模型的配置,相比 1.7B 模型具有更强的能力。适合对模型能力要求更高的场景,特别是需要更多世界知识的场景。 + +```jsx +import React from 'react'; +import { ClientAI } from '@douyinfe/semi-ui'; + +function Qwen4B() { + // 使用 Qwen3-4B 模型配置(中国大陆用户) + const engineConfig = ClientAI.Qwen3_4B_EngineConfigCN; + const modelId = engineConfig.appConfig.model_list[0].model_id; + + return ( + + ); +} +``` + +**模型选择建议**: +- **Qwen3-1.7B**:轻量级模型,适合大多数场景 +- **Qwen3-4B**:能力更强的模型,适合对模型能力要求更高的场景,特别是需要更多世界知识的场景 + +**ClientAIRenderProps 参数说明**: + +| 属性 | 说明 | 类型 | +|------|------|------| +| loading | 是否正在加载模型 | `boolean` | +| error | 错误信息 | `string \| null` | +| messages | 消息列表 | `Message[]` | +| isGenerating | 是否正在生成回复 | `boolean` | +| enableDeepThink | 是否启用深度思考 CoT | `boolean` | +| sendMessage | 发送消息 | `(text: string) => Promise` | +| stopGenerate | 停止生成 | `() => void` | +| clearMessages | 清空消息 | `() => void` | +| setEnableDeepThink | 设置深度思考 CoT 开关 | `(enable: boolean) => void` | +| sendToolResults | 发送 Tool 执行结果 | `(results: ToolCallResult[]) => Promise` | + +### 使用 Worker + +`ClientAI` 默认在主线程运行模型。如果你不提供 `worker.url`,模型会在主线程运行。在主线程运行模型可能会阻塞 UI,建议切换到 Worker 模式以获得更好的性能。 + +要切换到 Worker 模式以避免阻塞主线程,需要完成以下两个步骤: + +**步骤 1:创建 Worker 文件** + +创建一个 Worker 文件(例如 `worker.ts` 或 `worker.js`): + +```typescript +// worker.ts +import { WebWorkerMLCEngineHandler } from '@mlc-ai/web-llm'; + +const handler = new WebWorkerMLCEngineHandler(); + +// 处理来自主线程的消息 +self.onmessage = (msg: MessageEvent) => { + handler.onmessage(msg); +}; +``` + +**Worker 文件说明**: +- `WebWorkerMLCEngineHandler` 负责处理与主线程的通信,并处理传入的请求 +- Worker 线程会接收消息并使用隐藏的引擎处理实际计算,然后通过消息将结果返回给主线程 +- Worker 文件需要使用 ES Module 格式(`type: 'module'`) +- MLC Engine 相关依赖已包含在组件库中,无需额外安装 + +**步骤 2:在组件中配置 Worker URL** + +在组件中,通过 `worker` prop 传入 Worker 文件的 URL: + +```jsx +import React from 'react'; +import { ClientAI } from '@douyinfe/semi-ui'; + +function WithWorker() { + const engineConfig = ClientAI.Qwen3_1_7B_EngineConfigCN; + const modelId = engineConfig.appConfig.model_list[0].model_id; + + return ( + + ); +} +``` + +**配置说明**: +- `worker.url`:**必填**,Worker 文件的 URL。必须提供此参数才会切换到 Worker 模式 +- `worker.enabled`:是否启用 Worker,默认为 `true`。设置为 `false` 时会在主线程运行 +- **重要**:只有同时提供 `worker.url` 且 `worker.enabled !== false` 时才会使用 Worker 模式,否则默认在主线程运行 + +**使用打包工具**: + +**Vite**: +```typescript +worker: { + url: new URL('./worker.ts', import.meta.url).href, + enabled: true, +} +``` + +**Webpack**: +```typescript +// 需要安装 worker-loader 或使用 Webpack 5 的 Worker 支持 +import Worker from './worker.ts?worker'; + +worker: { + url: Worker, + enabled: true, +} +``` + +**注意事项**: +- Worker 文件需要从 `@mlc-ai/web-llm` 导入 `WebWorkerMLCEngineHandler` +- 确保打包工具正确配置了 Worker 支持 +- Worker 文件必须使用 ES Module 格式(`type: 'module'`) + +### 修改用户输入 + +`onUserMessage` 回调可以在用户消息发送前修改输入内容,修改后的内容将同时用于显示和发送给AI: + +```jsx live=true dir="column" noInline=true +import React from 'react'; +import { ClientAI } from '@douyinfe/semi-ui'; + +function OnUserMessageExample() { + const engineConfig = ClientAI.Qwen3_1_7B_EngineConfigCN; + const modelId = engineConfig.appConfig.model_list[0].model_id; + + return ( + { + // 自动添加前缀 + return `请用简洁的语言回答:${userContent}`; + }} + /> + ); +} + +render(); +``` + +### 拦截AI调用 + +`beforeAIInput` 回调可以在AI调用前返回自定义回复,如果返回非空字符串,将跳过AI调用直接使用该回复: + +```jsx live=true dir="column" noInline=true +import React from 'react'; +import { ClientAI } from '@douyinfe/semi-ui'; + +function BeforeAIInputExample() { + const engineConfig = ClientAI.Qwen3_1_7B_EngineConfigCN; + const modelId = engineConfig.appConfig.model_list[0].model_id; + + return ( + { + const lastMessage = messages[messages.length - 1]; + // 如果用户问的是"你好",直接返回固定回复 + if (lastMessage && lastMessage.content && lastMessage.content.includes('你好')) { + return '你好!我是AI助手,很高兴为您服务。'; + } + // 返回空字符串,正常调用AI + return ''; + }} + /> + ); +} + +render(); +``` + +### 修改AI回复 + +`afterAIInput` 回调可以在AI回复后修改回复内容: + +```jsx live=true dir="column" noInline=true +import React from 'react'; +import { ClientAI } from '@douyinfe/semi-ui'; + +function AfterAIInputExample() { + const engineConfig = ClientAI.Qwen3_1_7B_EngineConfigCN; + const modelId = engineConfig.appConfig.model_list[0].model_id; + + return ( + { + // 在AI回复前添加提示 + return `[AI回复] ${aiContent}`; + }} + /> + ); +} + +render(); +``` + +### 控制流式显示 + +`stream` 参数控制是否流式显示AI回复。当设置为 `false` 时,会等待流式返回完毕后才一次性显示: + +```jsx live=true dir="column" noInline=true +import React from 'react'; +import { ClientAI } from '@douyinfe/semi-ui'; + +function StreamExample() { + const engineConfig = ClientAI.Qwen3_1_7B_EngineConfigCN; + const modelId = engineConfig.appConfig.model_list[0].model_id; + + return ( + + ); +} + +render(); +``` + +### 设置默认对话消息 + +`defaultMessages` 用于设置初始的对话历史,组件加载时会显示这些消息: + +```jsx live=true dir="column" noInline=true +import React from 'react'; +import { ClientAI } from '@douyinfe/semi-ui'; + +function DefaultMessagesExample() { + const engineConfig = ClientAI.Qwen3_1_7B_EngineConfigCN; + const modelId = engineConfig.appConfig.model_list[0].model_id; + + const defaultMessages = [ + { + id: 'msg-1', + role: 'user', + content: '你好,请介绍一下你自己', + createdAt: Date.now() - 60000, + status: 'completed', + }, + { + id: 'msg-2', + role: 'assistant', + content: '你好!我是AI助手,很高兴为您服务。我可以帮助您解答问题、提供信息和建议。', + createdAt: Date.now() - 30000, + status: 'completed', + }, + ]; + + return ( + + ); +} + +render(); +``` + +## API 参考 + +### ClientAI + +| 属性 | 说明 | 类型 | 默认值 | +|------|------|------|--------| +| afterAIInput | AI回复后的回调,可以修改AI的回复内容 | `(aiContent: string, messages: Message[]) => string \| Promise` | - | +| beforeAIInput | AI回复前的回调,可以拦截AI调用并返回自定义回复。返回非空字符串将作为AI回复,返回空字符串则正常调用AI | `(messages: Message[]) => string \| Promise` | - | +| chatOpts | 聊天选项,可配置 tools 等。覆盖模型默认配置 | `ChatOptions \| ChatOptions[]` | - | +| className | 自定义类名 | `string` | - | +| defaultEnableDeepThink | 深度思考 CoT 默认状态 | `boolean` | `true` | +| defaultMessages | 默认对话消息,用于设置初始的对话历史 | `Message[]` | - | +| dialogueProps | AIChatDialogue 的透传 props | `Partial` | - | +| engineConfig | 引擎配置,**必填**。可使用 `ClientAI.Qwen3_1_7B_EngineConfig` / `ClientAI.Qwen3_1_7B_EngineConfigCN`(1.7B 模型)或 `ClientAI.Qwen3_4B_EngineConfig` / `ClientAI.Qwen3_4B_EngineConfigCN`(4B 模型)。也可选择自部署模型文件 | `MLCEngineConfig` | - | +| inputProps | AIChatInput 的透传 props | `Partial` | - | +| modelId | 模型 ID,**必填**。可从引擎配置中获取,如:`engineConfig.appConfig.model_list[0].model_id` | `string \| string[]` | - | +| handleToolCall | Tool 调用处理函数,组件会自动调用此函数并等待返回结果,然后自动发送结果继续对话 | `(toolCalls: ToolCall[], rawOutput: string) => Promise \| ToolCallResult[]` | - | +| onError | 错误回调 | `(error: Error) => void` | - | +| onToolCall | Tool 调用回调,当 AI 输出包含 tool_call 时触发(仅用于通知,需要手动调用 sendToolResults) | `(toolCalls: ToolCall[], rawOutput: string) => void` | - | +| onUserMessage | 用户消息发送前的回调,可以修改用户输入内容。返回的字符串将同时用于显示和发送给AI | `(userContent: string, messages: Message[]) => string` | - | +| render | 自定义渲染函数,传入后将完全由用户控制 UI 渲染 | `(props: ClientAIRenderProps) => ReactNode` | - | +| roleConfig | 角色配置,用于配置用户、助手、系统等角色的名称和头像等信息 | `RoleConfig` | `{ user: { name: '用户' }, assistant: { name: 'AI 助手' }, system: { name: '系统' } }`(默认不包含 avatar) | +| showDeepThinkButton | 是否显示深度思考 CoT 按钮 | `boolean` | `false` | +| stream | 控制是否流式显示AI回复。当为 `false` 时,等待流式返回完毕后才一次性显示 | `boolean` | `true` | +| style | 自定义样式 | `React.CSSProperties` | - | +| systemPrompt | 系统提示词 | `string` | 根据浏览器语言动态设置:中文环境为 `'你是一个有用的 AI 助手。使用中文回复用户。'`,其他语言为 `'You are a helpful AI assistant. Reply to users in English.'` | +| worker | Worker 配置 | `{ url?: string; enabled?: boolean }` | `{ enabled: true }`(默认在主线程运行,需要提供 `url` 才会使用 Worker) | + +### 方法 + +通过 `ref` 可以调用以下方法: + +| 方法 | 说明 | 参数 | +|------|------|------| +| sendToolResults | 发送 Tool 执行结果,让 AI 继续对话(通常不需要手动调用,推荐使用 `handleToolCall` prop) | `(toolResults: ToolCallResult[]) => Promise` | + +### ToolCall 类型 + +```typescript +interface ToolCall { + type: 'function_call'; + call_id: string; + name: string; + arguments: string; + status: string; +} +``` + +### ToolCallResult 类型 + +```typescript +interface ToolCallResult { + call_id: string; + name: string; + arguments: string; + result: string; + status: 'success' | 'error'; +} +``` + +### 静态属性 + +`ClientAI` 组件提供了以下静态属性,你需要从中获取引擎配置并传入: + +**Qwen3-1.7B 模型**(轻量级): +- `ClientAI.Qwen3_1_7B_EngineConfig` - 国际用户引擎配置(使用 Hugging Face + GitHub Raw) +- `ClientAI.Qwen3_1_7B_EngineConfigCN` - 中国大陆用户引擎配置(使用 ModelScope + jsDelivr CDN) + +**Qwen3-4B 模型**(能力更强): +- `ClientAI.Qwen3_4B_EngineConfig` - 国际用户引擎配置(使用 Hugging Face + GitHub Raw) +- `ClientAI.Qwen3_4B_EngineConfigCN` - 中国大陆用户引擎配置(使用 ModelScope + jsDelivr CDN) + +> 💡 **提示**:你可以选择将模型文件下载到自己的 CDN 或 OSS 上,然后自定义配置指向自己的地址。参考 [自部署模型文件](#自部署模型文件) 章节。 + +### 类型说明 + +`ClientAI` 组件使用的类型(如 `MLCEngineConfig`、`ChatOptions`、`AppConfig` 等)都从 `@douyinfe/semi-foundation/clientAI/interface` 重新导出。这些类型与 MLC Engine SDK 保持一致。你可以查阅 [MLC Engine 文档](https://github.com/mlc-ai/mlc-llm) 了解这些类型的详细说明。 + +你可以从 `@douyinfe/semi-foundation/clientAI/interface` 导入这些类型: + +```typescript +import type { + MLCEngineConfig, + ChatOptions, + AppConfig, + WebWorkerMLCEngine, + // ... 其他类型 +} from '@douyinfe/semi-foundation/clientAI/interface'; +``` + +## 注意事项 + +1. **Worker 文件实现**:使用 Worker 模式时,需要创建 Worker 文件。Worker 文件需要导入 `WebWorkerMLCEngineHandler` 并处理消息。参考上面的 [使用 Worker](#使用-worker) 部分了解详细实现。 + +2. **模型加载时间**:首次加载模型可能需要较长时间,建议显示加载状态给用户。组件会自动显示加载进度条。 + +3. **浏览器兼容性**:MLC Engine 需要浏览器支持 WebGPU,请确保目标浏览器支持该特性。 + +4. **内存使用**:运行 AI 模型会消耗较多内存,建议在内存充足的设备上使用。 + +5. **模型文件大小**:模型文件可能较大,首次下载需要时间,建议使用 IndexedDB 缓存。 + +6. **类型导入**:所有 MLC Engine 相关的类型都可以从 `@douyinfe/semi-foundation/clientAI/interface` 导入,这些类型与 MLC Engine SDK 保持一致,方便查阅文档。 + +7. **Tool Calling 支持**:底层 MLC Engine 原生仅支持 Hermes 系列模型的 Function Calling。我们通过分析 Qwen 的训练过程和 tokenizer config,在 MLC Engine 下扩展支持了 Qwen 系列模型。 + +8. **支持 Tool Calling 的模型**: + - ✅ Qwen 系列模型(Qwen3-0.6B、Qwen3-1.7B、Qwen3-4B 等) + - ✅ Hermes 系列模型(MLC 原生支持) + diff --git a/content/order.js b/content/order.js index dc298e7a3b..b07c7f804f 100644 --- a/content/order.js +++ b/content/order.js @@ -98,6 +98,7 @@ const order = [ 'audioPlayer', 'videoPlayer', 'aiComponent', + 'clientAI', 'aiChatInput', 'aiChatDialogue', ]; diff --git a/ecosystem/semi-mcp/.gitignore b/ecosystem/semi-mcp/.gitignore new file mode 100644 index 0000000000..b538abcd17 --- /dev/null +++ b/ecosystem/semi-mcp/.gitignore @@ -0,0 +1,14 @@ +# Local +.DS_Store +*.local +*.log* + +# Dist +node_modules +dist/ +storybook-static + +# IDE +.vscode/* +!.vscode/extensions.json +.idea diff --git a/ecosystem/semi-mcp/AGENTS.md b/ecosystem/semi-mcp/AGENTS.md new file mode 100644 index 0000000000..b1bcf06cb3 --- /dev/null +++ b/ecosystem/semi-mcp/AGENTS.md @@ -0,0 +1,20 @@ +# AGENTS.md + +You are an expert in JavaScript, Rspack, Rsbuild, Rslib, and library development. You write maintainable, performant, and accessible code. + +## Commands + +- `npm run build` - Build the library for production +- `npm run dev` - Turn on watch mode, watch for changes and rebuild the library + +## Docs + +- Rslib: https://rslib.rs/llms.txt +- Rsbuild: https://rsbuild.rs/llms.txt +- Rspack: https://rspack.rs/llms.txt + +## Tools + +### Rstest + +- Run `npm run test` to test your code diff --git a/ecosystem/semi-mcp/README-zh_CN.md b/ecosystem/semi-mcp/README-zh_CN.md new file mode 100644 index 0000000000..f4e08c06ac --- /dev/null +++ b/ecosystem/semi-mcp/README-zh_CN.md @@ -0,0 +1,228 @@ +[中文](README-zh_CN.md) | [English](README.md) + +# Semi MCP Server + +基于 Model Context Protocol (MCP) SDK 实现的 MCP 服务器,提供 Semi Design 组件文档和组件列表查询功能。 + +## 简介 + +Semi MCP Server 是一个 MCP (Model Context Protocol) 服务器,通过 stdio 传输层与支持 MCP 协议的客户端通信。它提供了获取 Semi Design 组件文档、组件列表等功能。 + +## 安装 + +### 全局安装 + +```bash +npm install -g @douyinfe/semi-mcp +``` + +### 本地安装 + +```bash +npm install @douyinfe/semi-mcp +``` + +## 使用方法 + +### 作为命令行工具 + +全局安装后,可以直接使用: + +```bash +semi-mcp +``` + +### 在 MCP 客户端中配置 + +在支持 MCP 的客户端(如 Claude Desktop)中配置: + +```json +{ + "mcpServers": { + "semi-mcp": { + "command": "npx", + "args": ["-y", "@douyinfe/semi-mcp"] + } + } +} +``` + +或者如果已全局安装: + +```json +{ + "mcpServers": { + "semi-mcp": { + "command": "semi-mcp" + } + } +} +``` + +## 功能 + +### 工具 (Tools) + +#### `get_semi_document` + +获取 Semi Design 组件文档或组件列表。 + +**参数:** +- `componentName` (可选): 组件名称,例如 `Button`、`Input` 等。如果不提供,则返回组件列表 +- `version` (可选): 版本号,例如 `2.89.2-alpha.3`。如果不提供,默认使用 `latest` +- `get_path` (可选): 如果为 `true`,将文档写入操作系统临时目录并返回路径,而不是在响应中返回文档内容。默认为 `false` + +**示例:** + +获取组件列表: +```json +{ + "name": "get_semi_document" +} +``` + +获取指定组件文档: +```json +{ + "name": "get_semi_document", + "arguments": { + "componentName": "Button", + "version": "2.89.2-alpha.3" + } +} +``` + +**返回格式:** + +所有响应均以纯文本形式返回,方便 AI 直接阅读。 + +获取组件列表时: +``` +Semi Design 组件列表 (版本 2.89.2-alpha.3),共 70 个组件: + +button, input, select, table, ... +``` + +获取小型组件文档时(< 888 行): +``` +===== index.md ===== + +--- +title: Button +... +--- + +## 使用方法 +... + +===== index-en-US.md ===== + +--- +title: Button +... +--- + +## Usage +... +``` + +获取大型组件文档时(> 888 行),工具会自动保存到临时目录: +``` +组件 Table (版本 2.89.2-alpha.3) 文档较大,已保存到临时目录。 + +文档文件列表: + - /tmp/semi-docs-table-2.89.2-alpha.3-1234567890/index.md (6,055 行) + - /tmp/semi-docs-table-2.89.2-alpha.3-1234567890/index-en-US.md (5,660 行) + +请使用文件读取工具查看文档内容。 +``` + +### 资源 (Resources) + +#### `semi://components` + +Semi Design 组件列表资源。 + +## 开发 + +### 环境要求 + +- Node.js >= 18.0.0 +- npm 或 yarn + +### 安装依赖 + +```bash +npm install +``` + +### 构建 + +构建生产版本: + +```bash +npm run build +``` + +开发模式(监听文件变化并自动重建): + +```bash +npm run dev +``` + +### 测试 + +运行测试: + +```bash +npm test +``` + +### 运行 + +构建完成后运行服务器: + +```bash +npm start +``` + +或者直接运行构建后的文件: + +```bash +node dist/index.js +``` + +## 技术栈 + +- **TypeScript**: 类型安全的 JavaScript +- **Rslib**: 快速构建工具 +- **@modelcontextprotocol/sdk**: MCP 官方 SDK + +## 项目结构 + +``` +semi-mcp/ +├── src/ +│ ├── index.ts # 主入口文件 +│ ├── tools/ # 工具定义 +│ │ ├── index.ts +│ │ └── get-semi-document.ts +│ └── utils/ # 工具函数 +│ ├── fetch-directory-list.ts +│ ├── fetch-file-content.ts +│ └── get-component-list.ts +├── tests/ # 测试文件 +│ └── get-semi-document.test.ts +├── dist/ # 构建输出 +├── package.json +└── README.md +``` + +## 许可证 + +MIT + +## 相关链接 + +- [Semi Design 官网](https://semi.design) +- [Model Context Protocol 文档](https://modelcontextprotocol.io) diff --git a/ecosystem/semi-mcp/README.md b/ecosystem/semi-mcp/README.md new file mode 100644 index 0000000000..670fc96890 --- /dev/null +++ b/ecosystem/semi-mcp/README.md @@ -0,0 +1,228 @@ +[中文](README-zh_CN.md) | [English](README.md) + +# Semi MCP Server + +An MCP (Model Context Protocol) server implementation based on the MCP SDK, providing Semi Design component documentation and component list query functionality. + +## Introduction + +Semi MCP Server is an MCP (Model Context Protocol) server that communicates with MCP-compatible clients through stdio transport. It provides functionality to fetch Semi Design component documentation, component lists, and more. + +## Installation + +### Global Installation + +```bash +npm install -g @douyinfe/semi-mcp +``` + +### Local Installation + +```bash +npm install @douyinfe/semi-mcp +``` + +## Usage + +### As a Command Line Tool + +After global installation, you can use it directly: + +```bash +semi-mcp +``` + +### Configuration in MCP Clients + +Configure in MCP-compatible clients (such as Claude Desktop): + +```json +{ + "mcpServers": { + "semi-mcp": { + "command": "npx", + "args": ["-y", "@douyinfe/semi-mcp"] + } + } +} +``` + +Or if installed globally: + +```json +{ + "mcpServers": { + "semi-mcp": { + "command": "semi-mcp" + } + } +} +``` + +## Features + +### Tools + +#### `get_semi_document` + +Get Semi Design component documentation or component list. + +**Parameters:** +- `componentName` (optional): Component name, e.g., `Button`, `Input`, etc. If not provided, returns the component list +- `version` (optional): Version number, e.g., `2.89.2-alpha.3`. If not provided, defaults to `latest` +- `get_path` (optional): If `true`, saves documents to the system temporary directory and returns the path instead of returning document content in the response. Defaults to `false` + +**Examples:** + +Get component list: +```json +{ + "name": "get_semi_document" +} +``` + +Get specific component documentation: +```json +{ + "name": "get_semi_document", + "arguments": { + "componentName": "Button", + "version": "2.89.2-alpha.3" + } +} +``` + +**Response Format:** + +All responses are returned as plain text for AI-friendly consumption. + +When getting component list: +``` +Semi Design 组件列表 (版本 2.89.2-alpha.3),共 70 个组件: + +button, input, select, table, ... +``` + +When getting small component documentation (< 888 lines): +``` +===== index.md ===== + +--- +title: Button +... +--- + +## Usage +... + +===== index-en-US.md ===== + +--- +title: Button +... +--- + +## Usage +... +``` + +When getting large component documentation (> 888 lines), the tool automatically saves to temp directory: +``` +组件 Table (版本 2.89.2-alpha.3) 文档较大,已保存到临时目录。 + +文档文件列表: + - /tmp/semi-docs-table-2.89.2-alpha.3-1234567890/index.md (6,055 行) + - /tmp/semi-docs-table-2.89.2-alpha.3-1234567890/index-en-US.md (5,660 行) + +请使用文件读取工具查看文档内容。 +``` + +### Resources + +#### `semi://components` + +Semi Design component list resource. + +## Development + +### Requirements + +- Node.js >= 18.0.0 +- npm or yarn + +### Install Dependencies + +```bash +npm install +``` + +### Build + +Build production version: + +```bash +npm run build +``` + +Development mode (watch for file changes and auto-rebuild): + +```bash +npm run dev +``` + +### Test + +Run tests: + +```bash +npm test +``` + +### Run + +Run the server after building: + +```bash +npm start +``` + +Or run the built file directly: + +```bash +node dist/index.js +``` + +## Tech Stack + +- **TypeScript**: Type-safe JavaScript +- **Rslib**: Fast build tool +- **@modelcontextprotocol/sdk**: Official MCP SDK + +## Project Structure + +``` +semi-mcp/ +├── src/ +│ ├── index.ts # Main entry file +│ ├── tools/ # Tool definitions +│ │ ├── index.ts +│ │ └── get-semi-document.ts +│ └── utils/ # Utility functions +│ ├── fetch-directory-list.ts +│ ├── fetch-file-content.ts +│ └── get-component-list.ts +├── tests/ # Test files +│ └── get-semi-document.test.ts +├── dist/ # Build output +├── package.json +└── README.md +``` + +## License + +MIT + +## Related Links + +- [Semi Design Official Website](https://semi.design) +- [Model Context Protocol Documentation](https://modelcontextprotocol.io) diff --git a/ecosystem/semi-mcp/package.json b/ecosystem/semi-mcp/package.json new file mode 100644 index 0000000000..c7aa2e172d --- /dev/null +++ b/ecosystem/semi-mcp/package.json @@ -0,0 +1,67 @@ +{ + "name": "@douyinfe/semi-mcp", + "version": "1.0.9", + "description": "Semi Design MCP Server - Model Context Protocol server for Semi Design components and documentation", + "type": "module", + "main": "./dist/index.js", + "module": "./dist/browser.js", + "browser": "./dist/browser.js", + "bin": { + "semi-mcp": "./dist/index.js" + }, + "exports": { + ".": { + "types": "./dist/index.d.ts", + "import": "./dist/index.js" + }, + "./browser": { + "types": "./dist/browser.d.ts", + "browser": "./dist/browser.js", + "import": "./dist/browser.js" + }, + "./utils": { + "types": "./dist/browser.d.ts", + "browser": "./dist/browser.js", + "import": "./dist/browser.js" + } + }, + "types": "./dist/index.d.ts", + "files": [ + "dist" + ], + "engines": { + "node": ">=18.0.0" + }, + "sideEffects": false, + "keywords": [ + "semi", + "semi-design", + "mcp", + "model-context-protocol", + "design-system", + "component-library", + "documentation" + ], + "author": "", + "license": "MIT", + "publishConfig": { + "access": "public" + }, + "scripts": { + "build": "rslib build", + "dev": "rslib build --watch", + "test": "rstest", + "start": "node dist/index.js", + "prepublishOnly": "npm run build && npm test" + }, + "dependencies": { + "@modelcontextprotocol/sdk": "^1.0.4" + }, + "devDependencies": { + "@rslib/core": "^0.18.5", + "@rstest/core": "^0.7.2", + "@types/node": "^24.10.4", + "tsx": "^4.19.2", + "typescript": "^5.9.3" + } +} diff --git a/ecosystem/semi-mcp/rslib.config.ts b/ecosystem/semi-mcp/rslib.config.ts new file mode 100644 index 0000000000..5498cc3484 --- /dev/null +++ b/ecosystem/semi-mcp/rslib.config.ts @@ -0,0 +1,38 @@ +import { defineConfig } from '@rslib/core'; + +export default defineConfig({ + lib: [ + // MCP Server 入口 - 仅 Node.js 环境 + { + source: { + entry: { + index: './src/index.ts', + }, + }, + format: 'esm', + syntax: ['node 18'], + dts: true, + output: { + distPath: { + root: './dist', + }, + }, + }, + // 浏览器兼容入口 - 可在浏览器和 Node.js 中运行 + { + source: { + entry: { + browser: './src/browser.ts', + }, + }, + format: 'esm', + syntax: ['es2020'], + dts: true, + output: { + distPath: { + root: './dist', + }, + }, + }, + ], +}); diff --git a/ecosystem/semi-mcp/rstest.config.ts b/ecosystem/semi-mcp/rstest.config.ts new file mode 100644 index 0000000000..9ee3cbabc1 --- /dev/null +++ b/ecosystem/semi-mcp/rstest.config.ts @@ -0,0 +1,3 @@ +import { defineConfig } from '@rstest/core'; + +export default defineConfig({}); diff --git a/ecosystem/semi-mcp/src/browser.ts b/ecosystem/semi-mcp/src/browser.ts new file mode 100644 index 0000000000..bb0bd42259 --- /dev/null +++ b/ecosystem/semi-mcp/src/browser.ts @@ -0,0 +1,105 @@ +/** + * Semi MCP Browser Entry + * + * 这个入口导出所有可以在浏览器环境中运行的原子功能 + * 这些功能只使用 fetch API,不依赖 Node.js 特有的模块 + */ + +// ============ 核心工具函数 ============ + +// 获取目录列表 +export { + fetchDirectoryList, + fetchDirectoryListFromSource, + UNPKG_BASE_URL, + NPMMIRROR_BASE_URL, +} from './utils/fetch-directory-list.js'; + +// 获取文件内容 +export { + fetchFileContent, + fetchFileContentFromSource, + UNPKG_BASE_URL as FILE_UNPKG_BASE_URL, + NPMMIRROR_BASE_URL as FILE_NPMMIRROR_BASE_URL, +} from './utils/fetch-file-content.js'; + +// 获取组件列表 +export { getComponentList } from './utils/get-component-list.js'; + +// 获取组件文档 +export { + getComponentDocuments, + type ComponentDocument, + type ComponentDocumentsResult, +} from './utils/get-component-documents.js'; + +// ============ 便捷 API ============ + +/** + * 获取 Semi Design 组件文档 + * 这是一个便捷的高级 API,封装了获取文档的完整流程 + * + * @param componentName - 组件名称,如 'Button', 'Table' 等。如果不提供,返回组件列表 + * @param version - 版本号,如 '2.89.1'。默认为 'latest' + * @returns 文档内容或组件列表 + * + * @example + * // 获取组件列表 + * const components = await getSemiDocument(); + * console.log(components.components); // ['button', 'table', ...] + * + * @example + * // 获取特定组件文档 + * const doc = await getSemiDocument('Button'); + * console.log(doc.documents[0].content); // 文档内容 + */ +export async function getSemiDocument( + componentName?: string, + version: string = 'latest' +): Promise< + | { type: 'list'; version: string; components: string[]; count: number } + | { type: 'document'; componentName: string; version: string; category: string; documents: Array<{ name: string; path: string; content: string }> } + | { type: 'error'; message: string } +> { + const { getComponentList } = await import('./utils/get-component-list.js'); + const { getComponentDocuments } = await import('./utils/get-component-documents.js'); + + try { + if (!componentName) { + // 返回组件列表 + const components = await getComponentList(version); + return { + type: 'list', + version, + components, + count: components.length, + }; + } else { + // 返回组件文档 + const result = await getComponentDocuments(componentName, version); + + if (!result) { + const allComponents = await getComponentList(version); + return { + type: 'error', + message: `未找到组件 "${componentName}" 的文档 (版本 ${version})。可用组件列表:${allComponents.join(', ')}`, + }; + } + + return { + type: 'document', + componentName, + version, + category: result.category, + documents: result.documents, + }; + } + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + return { + type: 'error', + message: `获取文档失败: ${errorMessage}`, + }; + } +} + diff --git a/ecosystem/semi-mcp/src/index.ts b/ecosystem/semi-mcp/src/index.ts new file mode 100644 index 0000000000..0d14f38172 --- /dev/null +++ b/ecosystem/semi-mcp/src/index.ts @@ -0,0 +1,166 @@ +#!/usr/bin/env node + +import { Server } from '@modelcontextprotocol/sdk/server/index.js'; +import { StdioServerTransport } from '@modelcontextprotocol/sdk/server/stdio.js'; +import { + CallToolRequestSchema, + ListToolsRequestSchema, + ListResourcesRequestSchema, + ReadResourceRequestSchema, +} from '@modelcontextprotocol/sdk/types.js'; +import { readFileSync } from 'fs'; +import { fileURLToPath } from 'url'; +import { dirname, join } from 'path'; +import { tools, toolHandlers } from './tools/index.js'; +import { getComponentList } from './utils/get-component-list.js'; + +// 获取当前文件的目录路径 +const __filename = fileURLToPath(import.meta.url); +const __dirname = dirname(__filename); + +// 读取 package.json 获取版本号 +// 尝试多个可能的路径(开发环境和生产环境) +let packageJson: { version: string }; +try { + // 生产环境:dist/index.js -> ../package.json + const packageJsonPath = join(__dirname, '../package.json'); + packageJson = JSON.parse(readFileSync(packageJsonPath, 'utf-8')); +} catch { + try { + // 开发环境:src/index.ts -> ../../package.json + const packageJsonPath = join(__dirname, '../../package.json'); + packageJson = JSON.parse(readFileSync(packageJsonPath, 'utf-8')); + } catch { + // 如果都失败,使用默认版本号 + packageJson = { version: '1.0.0' }; + } +} + +/** + * Semi MCP Server + * 基于 Model Context Protocol SDK 实现的 MCP 服务器 + * 使用 stdio 作为传输层 + */ +async function main() { + // 创建 MCP 服务器实例 + const server = new Server( + { + name: 'semi-mcp', + version: packageJson.version, + }, + { + capabilities: { + tools: {}, + resources: {}, + }, + } + ); + + // 注册工具列表处理器 + server.setRequestHandler(ListToolsRequestSchema, async () => { + return { + tools, + }; + }); + + // 注册工具调用处理器 + server.setRequestHandler(CallToolRequestSchema, async (request) => { + const { name, arguments: args } = request.params; + + const handler = toolHandlers[name]; + if (!handler) { + throw new Error(`未知的工具: ${name}`); + } + + return handler(args || {}); + }); + + // 注册资源列表处理器 + server.setRequestHandler(ListResourcesRequestSchema, async () => { + return { + resources: [ + { + uri: 'semi://components', + name: 'Semi Components', + description: 'Semi Design 组件列表', + mimeType: 'application/json', + }, + ], + }; + }); + + // 注册资源读取处理器 + server.setRequestHandler(ReadResourceRequestSchema, async (request) => { + const { uri } = request.params; + + if (uri === 'semi://components' || uri.startsWith('semi://components')) { + // 默认使用 latest 版本,资源 URI 不支持查询参数,所以固定使用 latest + // 如果需要指定版本,应该使用工具 get_semi_document + const version = 'latest'; + + try { + const components = await getComponentList(version); + return { + contents: [ + { + uri, + mimeType: 'application/json', + text: JSON.stringify( + { + version, + components, + count: components.length, + description: 'Semi Design 组件列表', + note: '如需指定版本,请使用 get_semi_document 工具', + }, + null, + 2 + ), + }, + ], + }; + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + return { + contents: [ + { + uri, + mimeType: 'application/json', + text: JSON.stringify( + { + version, + error: errorMessage, + components: [], + count: 0, + }, + null, + 2 + ), + }, + ], + }; + } + } + + throw new Error(`未知的资源 URI: ${uri}`); + }); + + // 创建 stdio 传输层 + const transport = new StdioServerTransport(); + + // 连接服务器到传输层 + await server.connect(transport); + + // 注意:不要在这里输出任何内容到 stdout/stderr,因为会干扰 JSON-RPC 通信 + // MCP 服务器通过 stdio 进行 JSON-RPC 通信,任何非 JSON 输出都会导致协议错误 +} + +// 启动服务器 +main().catch((error) => { + // 只在真正出错时输出错误信息,并确保格式正确 + const errorMessage = error instanceof Error ? error.message : String(error); + // 使用 stderr 输出错误,但要注意不要干扰 JSON-RPC 通信 + // 如果是在初始化之前出错,可以输出;如果是在运行中出错,应该通过 JSON-RPC 错误响应返回 + process.stderr.write(`Semi MCP Server 启动失败: ${errorMessage}\n`); + process.exit(1); +}); diff --git a/ecosystem/semi-mcp/src/tools/get-semi-document.ts b/ecosystem/semi-mcp/src/tools/get-semi-document.ts new file mode 100644 index 0000000000..dab0e3edfd --- /dev/null +++ b/ecosystem/semi-mcp/src/tools/get-semi-document.ts @@ -0,0 +1,165 @@ +import { CallToolResult } from '@modelcontextprotocol/sdk/types.js'; +import { Tool } from '@modelcontextprotocol/sdk/types.js'; +import { getComponentList } from '../utils/get-component-list.js'; +import { getComponentDocuments } from '../utils/get-component-documents.js'; +import { mkdir, writeFile } from 'fs/promises'; +import { join } from 'path'; +import { tmpdir } from 'os'; + +/** + * 工具定义:获取 Semi Design 组件文档 + */ +export const getSemiDocumentTool: Tool = { + name: 'get_semi_document', + description: '获取 Semi Design 组件文档或组件列表', + inputSchema: { + type: 'object', + properties: { + componentName: { + type: 'string', + description: '组件名称,例如 Button、Input 等。如果不提供,则返回组件列表', + }, + version: { + type: 'string', + description: '版本号,例如 2.89.1。如果不提供,默认使用 latest', + }, + get_path: { + type: 'boolean', + description: '如果为 true,将文档写入操作系统临时目录并返回路径,而不是在响应中返回文档内容。默认为 false', + default: false, + }, + }, + required: [], + }, +}; + +/** + * 工具处理器:处理 get_semi_document 工具调用 + */ +export async function handleGetSemiDocument( + args: Record +): Promise { + const componentName = args?.componentName as string | undefined; + const version = (args?.version as string | undefined) || 'latest'; + const getPath = (args?.get_path as boolean | undefined) || false; + + try { + if (!componentName) { + // 返回组件列表 + const components = await getComponentList(version); + + if (components.length === 0) { + return { + content: [ + { + type: 'text', + text: `未找到组件列表,请检查版本号 ${version} 是否正确`, + }, + ], + isError: true, + }; + } + + return { + content: [ + { + type: 'text', + text: `Semi Design 组件列表 (版本 ${version}),共 ${components.length} 个组件:\n\n${components.join(', ')}`, + }, + ], + }; + } else { + // 返回组件文档列表 + const result = await getComponentDocuments(componentName, version); + + // 获取全部组件列表 + const allComponents = await getComponentList(version); + + if (!result) { + return { + content: [ + { + type: 'text', + text: `未找到组件 "${componentName}" 的文档 (版本 ${version})。\n\n可用组件列表:${allComponents.join(', ')}`, + }, + ], + }; + } + + // 计算每个文档的行数 + const documentsWithLines = result.documents.map(doc => ({ + ...doc, + lines: doc.content.split('\n').length, + })); + + // 检查是否有文档行数大于 888,如果有则自动开启 get_path + // 但只有在用户没有明确设置 get_path 时才自动开启 + const hasLargeDocument = documentsWithLines.some(doc => doc.lines > 888); + const userExplicitlySetGetPath = 'get_path' in args; + const shouldUsePath = getPath || (hasLargeDocument && !userExplicitlySetGetPath); + + // 如果 get_path 为 true 或自动开启,将文档写入临时目录 + if (shouldUsePath) { + const baseTempDir = tmpdir(); + const tempDirName = `semi-docs-${componentName.toLowerCase()}-${version}-${Date.now()}`; + const tempDir = join(baseTempDir, tempDirName); + + // 创建临时目录 + await mkdir(tempDir, { recursive: true }); + + // 写入所有文档文件 + for (const doc of result.documents) { + const filePath = join(tempDir, doc.name); + await writeFile(filePath, doc.content, 'utf-8'); + } + + // 构建纯文本提示信息 + const fileList = documentsWithLines.map(doc => + ` - ${join(tempDir, doc.name)} (${doc.lines.toLocaleString()} 行)` + ).join('\n'); + + const message = `组件 ${componentName} (版本 ${version}) 文档较大,已保存到临时目录。 + +文档文件列表: +${fileList} + +请使用文件读取工具查看文档内容。`; + + return { + content: [ + { + type: 'text', + text: message, + }, + ], + }; + } + + // 默认直接返回文档内容(纯文本) + const docContents = result.documents.map(doc => { + return `===== ${doc.name} =====\n\n${doc.content}`; + }).join('\n\n'); + + return { + content: [ + { + type: 'text', + text: docContents, + }, + ], + }; + } + } catch (error) { + const errorMessage = + error instanceof Error ? error.message : String(error); + return { + content: [ + { + type: 'text', + text: `获取文档失败: ${errorMessage}`, + }, + ], + isError: true, + }; + } +} diff --git a/ecosystem/semi-mcp/src/tools/index.ts b/ecosystem/semi-mcp/src/tools/index.ts new file mode 100644 index 0000000000..24b8611e72 --- /dev/null +++ b/ecosystem/semi-mcp/src/tools/index.ts @@ -0,0 +1,18 @@ +import { Tool, CallToolResult } from '@modelcontextprotocol/sdk/types.js'; +import { getSemiDocumentTool, handleGetSemiDocument } from './get-semi-document.js'; + +/** + * 所有工具的定义 + */ +export const tools: Tool[] = [getSemiDocumentTool]; + +/** + * 工具名称到处理器的映射 + */ +export const toolHandlers: Record< + string, + (args: Record) => Promise +> = { + [getSemiDocumentTool.name]: handleGetSemiDocument, +}; + diff --git a/ecosystem/semi-mcp/src/utils/fetch-directory-list.ts b/ecosystem/semi-mcp/src/utils/fetch-directory-list.ts new file mode 100644 index 0000000000..8c4a3061c2 --- /dev/null +++ b/ecosystem/semi-mcp/src/utils/fetch-directory-list.ts @@ -0,0 +1,263 @@ +/** + * 从 unpkg 或 npmmirror 获取目录列表 + * 同时向两个数据源发送请求,使用第一个成功返回的结果 + */ + +export const UNPKG_BASE_URL = 'https://unpkg.com'; +export const NPMMIRROR_BASE_URL = 'https://registry.npmmirror.com'; + +/** + * 递归扁平化嵌套的目录结构(用于处理 npmmirror 返回的嵌套格式) + */ +function flattenDirectoryStructure( + item: { path: string; type?: string; size?: number; files?: Array<{ path: string; type?: string; size?: number; files?: Array<{ path: string; type?: string; size?: number }> }> }, + result: Array<{ path: string; type?: string; size?: number }> = [] +): Array<{ path: string; type?: string; size?: number }> { + // 将当前项添加到结果中 + result.push({ + path: item.path, + type: item.type, + size: item.size, + }); + + // 如果有嵌套的 files 数组,递归处理 + if (item.files && Array.isArray(item.files)) { + for (const file of item.files) { + flattenDirectoryStructure(file, result); + } + } + + return result; +} + +/** + * 递归获取 NPMMIRROR 的目录列表(因为 NPMMIRROR 返回的嵌套结构中子目录的 files 是空的,需要递归请求) + */ +async function fetchNpmMirrorDirectoryRecursive( + baseUrl: string, + packageName: string, + version: string, + path: string, + maxDepth: number = 10 +): Promise> { + if (maxDepth <= 0) { + return []; + } + + const url = `${baseUrl}/${packageName}/${version}/files/${path}/?meta`; + const response = await fetch(url, { + headers: { + Accept: 'application/json', + }, + }); + + if (!response.ok) { + throw new Error(`获取目录列表失败: ${response.status} ${response.statusText}`); + } + + const contentType = response.headers.get('content-type') || ''; + if (!contentType.includes('application/json')) { + throw new Error(`API 返回了非 JSON 格式: ${contentType}`); + } + + const data = await response.json() as + | { path: string; type?: string; files?: Array<{ path: string; type?: string; files?: Array<{ path: string; type?: string }> }> }; + + const normalizeType = (item: { path: string; type?: string }): { path: string; type: string } => { + const path = item.path; + if (path.endsWith('/')) { + return { path, type: 'directory' }; + } + if (item.type && item.type.includes('/')) { + return { path, type: 'file' }; + } + if (item.type === 'directory') { + return { path, type: 'directory' }; + } + return { path, type: 'file' }; + }; + + const result: Array<{ path: string; type: string }> = []; + + if (data && typeof data === 'object' && 'files' in data && Array.isArray(data.files)) { + // 递归处理每个子项 + const promises: Promise>[] = []; + + for (const item of data.files) { + const normalized = normalizeType(item); + result.push(normalized); + + // 如果是目录且 files 数组为空,需要递归请求 + if (normalized.type === 'directory' && (!item.files || item.files.length === 0)) { + // 移除路径开头的 /,因为 URL 中不需要 + const subPath = normalized.path.startsWith('/') ? normalized.path.slice(1) : normalized.path; + promises.push( + fetchNpmMirrorDirectoryRecursive(baseUrl, packageName, version, subPath, maxDepth - 1) + .then(subFiles => { + // 移除当前目录本身,只保留子文件 + return subFiles.filter(f => f.path !== normalized.path); + }) + .catch(() => []) // 如果子目录请求失败,忽略错误 + ); + } else if (item.files && Array.isArray(item.files) && item.files.length > 0) { + // 如果已经有嵌套的 files,递归扁平化 + const flattened: Array<{ path: string; type?: string; size?: number }> = []; + flattenDirectoryStructure(item, flattened); + const subFiles = flattened + .filter(f => f.path !== normalized.path) // 排除当前目录本身 + .map(normalizeType); + result.push(...subFiles); + } + } + + // 等待所有递归请求完成 + if (promises.length > 0) { + const subResults = await Promise.all(promises); + for (const subFiles of subResults) { + result.push(...subFiles); + } + } + } + + return result; +} + +/** + * 从单个源获取目录列表 + * 导出用于测试 + */ +export async function fetchDirectoryListFromSource( + baseUrl: string, + packageName: string, + version: string, + path: string, + isNpmMirror: boolean = false +): Promise> { + // NPMMIRROR 需要递归请求,因为返回的嵌套结构中子目录的 files 是空的 + if (isNpmMirror) { + return fetchNpmMirrorDirectoryRecursive(baseUrl, packageName, version, path); + } + + // unpkg 使用格式:/package@version/path/?meta + const url = `${baseUrl}/${packageName}@${version}/${path}/?meta`; + + const response = await fetch(url, { + headers: { + Accept: 'application/json', + }, + }); + + if (!response.ok) { + throw new Error(`获取目录列表失败: ${response.status} ${response.statusText}`); + } + + const contentType = response.headers.get('content-type') || ''; + if (!contentType.includes('application/json')) { + throw new Error(`API 返回了非 JSON 格式: ${contentType}`); + } + + const data = (await response.json()) as + | Array<{ path: string; type?: string; size?: number }> + | { files?: Array<{ path: string; type?: string; size?: number }> } + | { path: string; type?: string; size?: number; files?: Array<{ path: string; type?: string; size?: number; files?: Array<{ path: string; type?: string; size?: number }> }> }; + + // 将 MIME 类型转换为 file/directory 类型 + const normalizeType = (item: { path: string; type?: string; size?: number }): { path: string; type: string } => { + const path = item.path; + // 如果路径以 / 结尾,认为是目录 + if (path.endsWith('/')) { + return { path, type: 'directory' }; + } + // 如果 type 是 MIME 类型(包含 /),认为是文件 + if (item.type && item.type.includes('/')) { + return { path, type: 'file' }; + } + // 如果 type 是 'directory',认为是目录 + if (item.type === 'directory') { + return { path, type: 'directory' }; + } + // 默认认为是文件 + return { path, type: 'file' }; + }; + + // 处理不同的响应格式 + if (Array.isArray(data)) { + // unpkg 返回的是扁平数组 + return data.map(normalizeType); + } + + // unpkg 可能返回 { package, version, prefix, files: [...] } 格式 + if (data && typeof data === 'object' && 'files' in data) { + const filesData = data as { files?: Array<{ path: string; type?: string; size?: number }> }; + if (Array.isArray(filesData.files)) { + // files 是数组,直接映射 + return filesData.files.map(normalizeType); + } + } + + // 如果返回单个文件对象,检查是否有嵌套结构 + if (data && typeof data === 'object' && 'path' in data) { + const singleItem = data as { path: string; type?: string; size?: number; files?: Array<{ path: string; type?: string; size?: number; files?: Array<{ path: string; type?: string; size?: number }> }> }; + // 如果有嵌套的 files,需要扁平化 + if (singleItem.files && Array.isArray(singleItem.files)) { + const flattened: Array<{ path: string; type?: string; size?: number }> = []; + flattenDirectoryStructure(singleItem, flattened); + return flattened.map(normalizeType); + } + // 否则直接返回单个项 + return [normalizeType(singleItem)]; + } + + throw new Error('无法解析目录列表数据格式'); +} + +/** + * 从 unpkg 或 npmmirror 获取目录列表 + * 同时向两个数据源发送请求,优先使用返回更多文件的结果 + */ +export async function fetchDirectoryList( + packageName: string, + version: string, + path: string +): Promise> { + // 同时向两个源发送请求 + const unpkgPromise = fetchDirectoryListFromSource(UNPKG_BASE_URL, packageName, version, path, false); + const npmmirrorPromise = fetchDirectoryListFromSource(NPMMIRROR_BASE_URL, packageName, version, path, true); + + // 等待所有请求完成(无论成功或失败) + const results = await Promise.allSettled([unpkgPromise, npmmirrorPromise]); + + // 收集成功的结果和错误 + const successfulResults: Array<{ source: string; files: Array<{ path: string; type: string }> }> = []; + const errors: Error[] = []; + + if (results[0].status === 'fulfilled') { + successfulResults.push({ source: 'unpkg', files: results[0].value }); + } else { + errors.push(results[0].reason instanceof Error ? results[0].reason : new Error(String(results[0].reason))); + } + + if (results[1].status === 'fulfilled') { + successfulResults.push({ source: 'npmmirror', files: results[1].value }); + } else { + errors.push(results[1].reason instanceof Error ? results[1].reason : new Error(String(results[1].reason))); + } + + // 如果没有成功的结果,抛出错误 + if (successfulResults.length === 0) { + throw new Error(`所有数据源都失败了: ${errors.map((e) => e.message).join('; ')}`); + } + + // 优先使用返回更多文件的结果 + // 如果文件数量相同,优先使用 unpkg(通常更可靠) + successfulResults.sort((a, b) => { + if (b.files.length !== a.files.length) { + return b.files.length - a.files.length; // 文件数量多的优先 + } + // 文件数量相同时,unpkg 优先 + return a.source === 'unpkg' ? -1 : 1; + }); + + return successfulResults[0].files; +} + diff --git a/ecosystem/semi-mcp/src/utils/fetch-file-content.ts b/ecosystem/semi-mcp/src/utils/fetch-file-content.ts new file mode 100644 index 0000000000..770b632d22 --- /dev/null +++ b/ecosystem/semi-mcp/src/utils/fetch-file-content.ts @@ -0,0 +1,86 @@ +/** + * 从 unpkg 或 npmmirror 获取具体文件内容 + * 同时向两个数据源发送请求,使用第一个成功返回的结果 + */ + +export const UNPKG_BASE_URL = 'https://unpkg.com'; +export const NPMMIRROR_BASE_URL = 'https://registry.npmmirror.com'; + +/** + * 从单个源获取文件内容 + * 导出用于测试 + */ +export async function fetchFileContentFromSource( + baseUrl: string, + packageName: string, + version: string, + filePath: string, + isNpmMirror: boolean = false +): Promise { + // npmmirror 使用不同的 URL 格式:/package/version/files/path + // unpkg 使用格式:/package@version/path + const url = isNpmMirror + ? `${baseUrl}/${packageName}/${version}/files/${filePath}` + : `${baseUrl}/${packageName}@${version}/${filePath}`; + + const response = await fetch(url, { + headers: { + Accept: 'text/plain, application/json, */*', + }, + }); + + if (!response.ok) { + throw new Error(`获取文件失败: ${response.status} ${response.statusText}`); + } + + const content = await response.text(); + + // 检查是否是 HTML 错误页面 + if (content.trim().startsWith('') || content.includes('npmmirror 镜像站')) { + throw new Error('返回了 HTML 错误页面'); + } + + return content; +} + +/** + * 从 unpkg 或 npmmirror 获取具体文件内容 + * 同时向两个数据源发送请求,使用第一个成功返回的结果 + */ +export async function fetchFileContent( + packageName: string, + version: string, + filePath: string +): Promise { + // 同时向两个源发送请求 + const unpkgPromise = fetchFileContentFromSource(UNPKG_BASE_URL, packageName, version, filePath, false); + const npmmirrorPromise = fetchFileContentFromSource(NPMMIRROR_BASE_URL, packageName, version, filePath, true); + + // 使用 Promise.race 获取第一个成功的结果 + // 将错误转换为永远不会 resolve 的 promise,这样另一个请求有机会成功 + const unpkgWithFallback = unpkgPromise.catch(() => new Promise(() => {})); + const npmmirrorWithFallback = npmmirrorPromise.catch(() => new Promise(() => {})); + + // 同时等待两个请求,使用 race 获取第一个成功的结果 + const raceResult = await Promise.race([unpkgWithFallback, npmmirrorWithFallback]).catch( + () => null + ); + + if (raceResult) { + return raceResult; + } + + // 如果 race 没有结果(两个都失败),等待所有请求完成以获取错误信息 + const results = await Promise.allSettled([unpkgPromise, npmmirrorPromise]); + + // 收集所有错误 + const errors: Error[] = []; + for (const result of results) { + if (result.status === 'rejected') { + errors.push(result.reason instanceof Error ? result.reason : new Error(String(result.reason))); + } + } + + throw new Error(`所有数据源都失败了: ${errors.map((e) => e.message).join('; ')}`); +} + diff --git a/ecosystem/semi-mcp/src/utils/get-component-documents.ts b/ecosystem/semi-mcp/src/utils/get-component-documents.ts new file mode 100644 index 0000000000..64d9060c09 --- /dev/null +++ b/ecosystem/semi-mcp/src/utils/get-component-documents.ts @@ -0,0 +1,103 @@ +import { fetchDirectoryList } from './fetch-directory-list.js'; +import { fetchFileContent } from './fetch-file-content.js'; + +export interface ComponentDocument { + name: string; + path: string; + content: string; +} + +export interface ComponentDocumentsResult { + category: string; + documents: ComponentDocument[]; +} + +/** + * 获取组件文档内容(从 content 文件夹) + * content 文件夹结构:content/{category}/{componentName}/index.md, index-en-US.md + * unpkg 返回的是扁平的文件列表,需要从文件路径中提取信息 + * + * 这个函数可以在浏览器和 Node.js 环境中运行 + */ +export async function getComponentDocuments( + componentName: string, + version: string = 'latest' +): Promise { + const packageName = '@douyinfe/semi-ui'; + const componentNameLower = componentName.toLowerCase(); + + // 获取 content 下的所有文件(unpkg 返回扁平列表) + const contentFiles = await fetchDirectoryList(packageName, version, 'content'); + + if (!contentFiles || contentFiles.length === 0) { + return null; + } + + // 从文件路径中查找匹配的组件文档(只要中文文档 index.md) + // 路径格式:/content/{category}/{componentName}/index.md + const componentFiles = contentFiles.filter((file) => { + if (file.type !== 'file') { + return false; + } + const path = file.path.toLowerCase(); + // 只匹配中文文档 index.md,排除 index-en-US.md + const pathPattern = new RegExp(`/content/[^/]+/${componentNameLower}/index\\.md$`); + return pathPattern.test(path); + }); + + if (componentFiles.length === 0) { + return null; + } + + // 从第一个文件路径中提取分类 + const firstPath = componentFiles[0].path; + const pathParts = firstPath.split('/'); + // 路径格式:/content/{category}/{componentName}/文件名 + // 或者:content/{category}/{componentName}/文件名 + let categoryIndex = -1; + for (let i = 0; i < pathParts.length; i++) { + if (pathParts[i].toLowerCase() === 'content') { + categoryIndex = i + 1; + break; + } + } + + if (categoryIndex === -1 || categoryIndex >= pathParts.length) { + return null; + } + + const category = pathParts[categoryIndex]; + + // 获取所有文档文件的内容 + // 移除路径开头的 /,因为 fetchFileContent 需要相对路径 + const documentPromises = componentFiles.map(async (file) => { + const filePath = file.path.startsWith('/') ? file.path.slice(1) : file.path; + const parts = file.path.split('/'); + const fileName = parts[parts.length - 1]; + + try { + const content = await fetchFileContent(packageName, version, filePath); + return { + name: fileName, + path: file.path, + content: content, + }; + } catch (error) { + // 如果获取文件内容失败,返回错误信息 + const errorMessage = error instanceof Error ? error.message : String(error); + return { + name: fileName, + path: file.path, + content: `获取文档内容失败: ${errorMessage}`, + }; + } + }); + + const documents = await Promise.all(documentPromises); + + return { + category, + documents: documents.sort((a, b) => a.name.localeCompare(b.name)), + }; +} + diff --git a/ecosystem/semi-mcp/src/utils/get-component-list.ts b/ecosystem/semi-mcp/src/utils/get-component-list.ts new file mode 100644 index 0000000000..b5a7d6535c --- /dev/null +++ b/ecosystem/semi-mcp/src/utils/get-component-list.ts @@ -0,0 +1,42 @@ +import { fetchDirectoryList } from './fetch-directory-list.js'; + +/** + * 获取组件列表(从 lib 文件夹) + * 从 @douyinfe/semi-ui 包的 lib 目录中提取组件名称 + */ +export async function getComponentList(version: string): Promise { + const packageName = '@douyinfe/semi-ui'; + const files = await fetchDirectoryList(packageName, version, 'lib'); + + if (!files || files.length === 0) { + return []; + } + + // 从文件路径中提取组件目录名称 + // 路径格式: /lib/cjs/Button/index.js 或 /lib/es/Button/index.js + const componentSet = new Set(); + + for (const file of files) { + const path = file.path; + // 移除开头的 /lib/ 前缀 + const pathWithoutLib = path.replace(/^\/lib\//, '').replace(/^lib\//, ''); + const parts = pathWithoutLib.split('/'); + + // 跳过 cjs、es 等构建目录 + if (parts.length >= 2 && (parts[0] === 'cjs' || parts[0] === 'es')) { + const componentName = parts[1]; + if (componentName && componentName !== 'lib') { + componentSet.add(componentName.toLowerCase()); + } + } else if (parts.length >= 1) { + // 如果没有 cjs/es 前缀,直接取第一部分 + const componentName = parts[0]; + if (componentName && componentName !== 'lib' && componentName !== 'cjs' && componentName !== 'es') { + componentSet.add(componentName.toLowerCase()); + } + } + } + + return Array.from(componentSet).sort(); // 去重并排序 +} + diff --git a/ecosystem/semi-mcp/test-table-docs.mjs b/ecosystem/semi-mcp/test-table-docs.mjs new file mode 100644 index 0000000000..b0a34059d1 --- /dev/null +++ b/ecosystem/semi-mcp/test-table-docs.mjs @@ -0,0 +1,299 @@ +// 直接实现函数,因为代码被打包了 +const UNPKG_BASE_URL = 'https://unpkg.com'; +const NPMMIRROR_BASE_URL = 'https://registry.npmmirror.com'; + +function flattenDirectoryStructure(item, result = []) { + result.push({ + path: item.path, + type: item.type, + size: item.size, + }); + if (item.files && Array.isArray(item.files)) { + for (const file of item.files) { + flattenDirectoryStructure(file, result); + } + } + return result; +} + +async function fetchDirectoryListFromSource(baseUrl, packageName, version, path, isNpmMirror = false) { + const url = isNpmMirror + ? `${baseUrl}/${packageName}/${version}/files/${path}/?meta` + : `${baseUrl}/${packageName}@${version}/${path}/?meta`; + + const response = await fetch(url, { + headers: { Accept: 'application/json' }, + }); + + if (!response.ok) { + throw new Error(`获取目录列表失败: ${response.status} ${response.statusText}`); + } + + const contentType = response.headers.get('content-type') || ''; + if (!contentType.includes('application/json')) { + throw new Error(`API 返回了非 JSON 格式: ${contentType}`); + } + + const data = await response.json(); + + const normalizeType = (item) => { + const path = item.path; + if (path.endsWith('/')) return { path, type: 'directory' }; + if (item.type && item.type.includes('/')) return { path, type: 'file' }; + if (item.type === 'directory') return { path, type: 'directory' }; + return { path, type: 'file' }; + }; + + if (Array.isArray(data)) { + return data.map(normalizeType); + } + + if (data && typeof data === 'object' && 'files' in data) { + if (Array.isArray(data.files)) { + const flattened = []; + for (const item of data.files) { + flattenDirectoryStructure(item, flattened); + } + return flattened.map(normalizeType); + } + return []; + } + + if (data && typeof data === 'object' && 'path' in data) { + const singleItem = data; + if (singleItem.files && Array.isArray(singleItem.files)) { + const flattened = []; + flattenDirectoryStructure(singleItem, flattened); + return flattened.map(normalizeType); + } + return [normalizeType(singleItem)]; + } + + throw new Error('无法解析目录列表数据格式'); +} + +async function fetchFileContentFromSource(baseUrl, packageName, version, filePath, isNpmMirror = false) { + const url = isNpmMirror + ? `${baseUrl}/${packageName}/${version}/files/${filePath}` + : `${baseUrl}/${packageName}@${version}/${filePath}`; + + const response = await fetch(url, { + headers: { Accept: 'text/plain, application/json, */*' }, + }); + + if (!response.ok) { + throw new Error(`获取文件失败: ${response.status} ${response.statusText}`); + } + + const content = await response.text(); + + if (content.trim().startsWith('') || content.includes('npmmirror 镜像站')) { + throw new Error('返回了 HTML 错误页面'); + } + + return content; +} + +const packageName = '@douyinfe/semi-ui'; +const version = '2.89.2-alpha.3'; +const componentName = 'table'; + +async function testUnpkg() { + console.log('\n' + '='.repeat(80)); + console.log('测试 UNPKG 数据源'); + console.log('='.repeat(80)); + + try { + console.log('\n1. 获取目录列表...'); + const files = await fetchDirectoryListFromSource( + UNPKG_BASE_URL, + packageName, + version, + 'content', + false + ); + + console.log(` 找到 ${files.length} 个文件/目录`); + + const tableFiles = files.filter((file) => { + const path = file.path.toLowerCase(); + return path.includes(componentName) && file.type === 'file'; + }); + + console.log(` 找到 ${tableFiles.length} 个 table 相关文件:`); + tableFiles.forEach((file, index) => { + console.log(` ${index + 1}. ${file.path} (${file.type})`); + }); + + if (tableFiles.length > 0) { + console.log('\n2. 获取文档内容...'); + for (const file of tableFiles.slice(0, 2)) { // 只获取前两个文件 + const filePath = file.path.startsWith('/') ? file.path.slice(1) : file.path; + console.log(`\n 文件: ${file.path}`); + console.log(' ' + '-'.repeat(76)); + + try { + const content = await fetchFileContentFromSource( + UNPKG_BASE_URL, + packageName, + version, + filePath, + false + ); + + console.log(` 内容长度: ${content.length} 字符`); + console.log(` 内容预览 (前500字符):`); + console.log(' ' + content.substring(0, 500).replace(/\n/g, '\n ')); + console.log(' ...'); + } catch (error) { + console.error(` 获取失败: ${error.message}`); + } + } + } + } catch (error) { + console.error(`\n错误: ${error.message}`); + console.error(error.stack); + } +} + +async function testNpmMirror() { + console.log('\n' + '='.repeat(80)); + console.log('测试 NPMMIRROR 数据源'); + console.log('='.repeat(80)); + + try { + console.log('\n1. 获取目录列表...'); + const files = await fetchDirectoryListFromSource( + NPMMIRROR_BASE_URL, + packageName, + version, + 'content', + true + ); + + console.log(` 找到 ${files.length} 个文件/目录`); + + const tableFiles = files.filter((file) => { + const path = file.path.toLowerCase(); + return path.includes(componentName) && file.type === 'file'; + }); + + console.log(` 找到 ${tableFiles.length} 个 table 相关文件:`); + tableFiles.forEach((file, index) => { + console.log(` ${index + 1}. ${file.path} (${file.type})`); + }); + + if (tableFiles.length > 0) { + console.log('\n2. 获取文档内容...'); + for (const file of tableFiles.slice(0, 2)) { // 只获取前两个文件 + const filePath = file.path.startsWith('/') ? file.path.slice(1) : file.path; + console.log(`\n 文件: ${file.path}`); + console.log(' ' + '-'.repeat(76)); + + try { + const content = await fetchFileContentFromSource( + NPMMIRROR_BASE_URL, + packageName, + version, + filePath, + true + ); + + console.log(` 内容长度: ${content.length} 字符`); + console.log(` 内容预览 (前500字符):`); + console.log(' ' + content.substring(0, 500).replace(/\n/g, '\n ')); + console.log(' ...'); + } catch (error) { + console.error(` 获取失败: ${error.message}`); + } + } + } else { + console.log('\n 注意: 未找到 table 文件,可能是嵌套结构未完全扁平化'); + console.log(` 前10个文件/目录示例:`); + files.slice(0, 10).forEach((file, index) => { + console.log(` ${index + 1}. ${file.path} (${file.type})`); + }); + } + } catch (error) { + console.error(`\n错误: ${error.message}`); + console.error(error.stack); + } +} + +async function compareSources() { + console.log('\n' + '='.repeat(80)); + console.log('对比两个数据源'); + console.log('='.repeat(80)); + + const filePath = 'content/show/table/index.md'; + + try { + console.log('\n同时从两个数据源获取文件...'); + const [unpkgResult, npmmirrorResult] = await Promise.allSettled([ + fetchFileContentFromSource(UNPKG_BASE_URL, packageName, version, filePath, false), + fetchFileContentFromSource(NPMMIRROR_BASE_URL, packageName, version, filePath, true), + ]); + + console.log('\nUNPKG 结果:'); + if (unpkgResult.status === 'fulfilled') { + const content = unpkgResult.value; + console.log(` ✓ 成功获取,内容长度: ${content.length} 字符`); + console.log(` 预览: ${content.substring(0, 100).replace(/\n/g, ' ')}...`); + } else { + console.log(` ✗ 失败: ${unpkgResult.reason?.message || '未知错误'}`); + } + + console.log('\nNPMMIRROR 结果:'); + if (npmmirrorResult.status === 'fulfilled') { + const content = npmmirrorResult.value; + console.log(` ✓ 成功获取,内容长度: ${content.length} 字符`); + console.log(` 预览: ${content.substring(0, 100).replace(/\n/g, ' ')}...`); + } else { + console.log(` ✗ 失败: ${npmmirrorResult.reason?.message || '未知错误'}`); + } + + if (unpkgResult.status === 'fulfilled' && npmmirrorResult.status === 'fulfilled') { + const unpkgContent = unpkgResult.value; + const npmmirrorContent = npmmirrorResult.value; + + console.log('\n对比结果:'); + console.log(` 内容长度差异: ${Math.abs(unpkgContent.length - npmmirrorContent.length)} 字符`); + console.log(` 内容是否相同: ${unpkgContent === npmmirrorContent ? '是' : '否'}`); + + if (unpkgContent !== npmmirrorContent) { + // 找出第一个不同的位置 + let diffIndex = 0; + const minLength = Math.min(unpkgContent.length, npmmirrorContent.length); + for (let i = 0; i < minLength; i++) { + if (unpkgContent[i] !== npmmirrorContent[i]) { + diffIndex = i; + break; + } + } + console.log(` 第一个差异位置: 第 ${diffIndex} 个字符`); + console.log(` UNPKG 片段: ${unpkgContent.substring(diffIndex, diffIndex + 50)}`); + console.log(` NPMMIRROR 片段: ${npmmirrorContent.substring(diffIndex, diffIndex + 50)}`); + } + } + } catch (error) { + console.error(`\n对比失败: ${error.message}`); + } +} + +async function main() { + console.log('Table 组件文档测试脚本'); + console.log(`包名: ${packageName}`); + console.log(`版本: ${version}`); + console.log(`组件: ${componentName}`); + + await testUnpkg(); + await testNpmMirror(); + await compareSources(); + + console.log('\n' + '='.repeat(80)); + console.log('测试完成'); + console.log('='.repeat(80)); +} + +main().catch(console.error); + diff --git a/ecosystem/semi-mcp/tests/get-semi-document.test.ts b/ecosystem/semi-mcp/tests/get-semi-document.test.ts new file mode 100644 index 0000000000..901accc6ff --- /dev/null +++ b/ecosystem/semi-mcp/tests/get-semi-document.test.ts @@ -0,0 +1,730 @@ +import { expect, test } from '@rstest/core'; +import { handleGetSemiDocument } from '../src/tools/get-semi-document.js'; +import { fetchFileContent, fetchFileContentFromSource, UNPKG_BASE_URL as FILE_UNPKG_BASE_URL, NPMMIRROR_BASE_URL as FILE_NPMMIRROR_BASE_URL } from '../src/utils/fetch-file-content.js'; +import { fetchDirectoryList, fetchDirectoryListFromSource, UNPKG_BASE_URL, NPMMIRROR_BASE_URL } from '../src/utils/fetch-directory-list.js'; +import { readFileSync, existsSync } from 'fs'; +import { join } from 'path'; + +test('get_semi_document: 获取组件列表(不提供组件名称)', async () => { + const result = await handleGetSemiDocument({}); + + expect(result).toBeDefined(); + expect(result.content).toBeDefined(); + expect(result.content.length).toBeGreaterThan(0); + + const firstContent = result.content[0]; + if (firstContent.type !== 'text') { + throw new Error('Expected text content'); + } + const text = firstContent.text; + + // 检查是否有错误 + if (result.isError) { + console.warn('API 调用返回错误:', text); + expect(text).toContain('latest'); + return; + } + + // 验证返回的是纯文本格式 + expect(text).toContain('Semi Design 组件列表'); + expect(text).toContain('版本'); + expect(text).toContain('个组件'); +}); + +test('get_semi_document: 获取组件列表(指定版本)', async () => { + const result = await handleGetSemiDocument({ + version: '2.89.2-alpha.3', + }); + + expect(result).toBeDefined(); + expect(result.content).toBeDefined(); + + const firstContent = result.content[0]; + if (firstContent.type !== 'text') { + throw new Error('Expected text content'); + } + const text = firstContent.text; + + // 检查是否有错误 + if (result.isError) { + console.warn('API 调用返回错误:', text); + expect(text).toContain('2.89.2-alpha.3'); + return; + } + + // 验证返回的是纯文本格式 + expect(text).toContain('Semi Design 组件列表'); + expect(text).toContain('2.89.2-alpha.3'); +}); + +test('get_semi_document: 获取 Button 组件文档', async () => { + const result = await handleGetSemiDocument({ + componentName: 'Button', + version: '2.89.2-alpha.3', + }); + + expect(result).toBeDefined(); + expect(result.content).toBeDefined(); + + const firstContent = result.content[0]; + if (firstContent.type !== 'text') { + throw new Error('Expected text content'); + } + const text = firstContent.text; + + // 检查是否有错误 + if (result.isError) { + console.warn('API 调用返回错误:', text); + return; + } + + // Button 文档比较小,应该直接返回文档内容 + // 验证包含文档分隔符和 markdown 内容 + expect(text).toContain('====='); + expect(text.toLowerCase()).toContain('button'); +}); + +test('get_semi_document: 获取 Input 组件文档(指定版本)', async () => { + const result = await handleGetSemiDocument({ + componentName: 'Input', + version: '2.89.2-alpha.3', + }); + + expect(result).toBeDefined(); + expect(result.content).toBeDefined(); + + const firstContent = result.content[0]; + if (firstContent.type !== 'text') { + throw new Error('Expected text content'); + } + const text = firstContent.text; + + // 检查是否有错误 + if (result.isError) { + console.warn('API 调用返回错误:', text); + return; + } + + // 验证包含文档内容 + expect(text.toLowerCase()).toContain('input'); +}); + +test('get_semi_document: 组件名称应该转为小写', async () => { + const result = await handleGetSemiDocument({ + componentName: 'BUTTON', + version: '2.89.2-alpha.3', + }); + + expect(result).toBeDefined(); + const firstContent = result.content[0]; + if (firstContent.type !== 'text') { + throw new Error('Expected text content'); + } + const text = firstContent.text; + + // 验证能正确处理大写组件名 + expect(text.toLowerCase()).toContain('button'); +}); + +test('get_semi_document: 不传入 version 时应该使用 latest(获取组件列表)', async () => { + const result = await handleGetSemiDocument({ + // 不传入 version + }); + + expect(result).toBeDefined(); + expect(result.content).toBeDefined(); + expect(result.content.length).toBeGreaterThan(0); + + const firstContent = result.content[0]; + if (firstContent.type !== 'text') { + throw new Error('Expected text content'); + } + const text = firstContent.text; + + // 检查是否有错误 + if (result.isError) { + console.warn('API 调用返回错误:', text); + expect(text).toContain('latest'); + return; + } + + // 验证返回的是纯文本格式,包含 latest 版本信息 + expect(text).toContain('Semi Design 组件列表'); + expect(text).toContain('latest'); +}); + +test('get_semi_document: 不传入 version 时应该使用 latest(获取组件文档)', async () => { + const result = await handleGetSemiDocument({ + componentName: 'Button', + // 不传入 version,但 latest 版本可能没有文档,所以这个测试可能会失败 + // 我们主要测试默认值逻辑 + }); + + expect(result).toBeDefined(); + expect(result.content).toBeDefined(); + + const firstContent = result.content[0]; + if (firstContent.type !== 'text') { + throw new Error('Expected text content'); + } + const text = firstContent.text; + + // 检查是否有错误(latest 版本可能没有文档) + if (result.isError) { + // latest 版本可能没有文档,这是正常的 + console.warn('API 调用返回错误:', text); + return; + } + + // 验证返回了有效内容 + expect(text.length).toBeGreaterThan(0); +}); + +test('get_semi_document: 获取 Table 组件文档并验证文档内容', async () => { + const componentName = 'Table'; + const version = '2.89.2-alpha.3'; + + // 1. 获取文档列表 + const result = await handleGetSemiDocument({ + componentName, + version, + }); + + expect(result).toBeDefined(); + expect(result.content).toBeDefined(); + expect(result.isError).not.toBe(true); + + const firstContent = result.content[0]; + if (firstContent.type !== 'text') { + throw new Error('Expected text content'); + } + const text = firstContent.text; + + // 检查是否有错误 + if (result.isError) { + console.warn('API 调用返回错误:', text); + return; + } + + // Table 文档很大,应该自动保存到临时目录 + expect(text).toContain('文档较大'); + expect(text).toContain('已保存到临时目录'); + expect(text).toContain('文档文件列表'); + + // 提取临时目录路径并验证文件存在 + const pathMatch = text.match(/\/tmp\/semi-docs-table[^\s]+/); + if (pathMatch) { + const filePath = pathMatch[0]; + expect(existsSync(filePath)).toBe(true); + + // 验证文件内容 + const fileContent = readFileSync(filePath, 'utf-8'); + expect(fileContent.length).toBeGreaterThan(0); + + // 验证文档内容是 markdown 格式 + expect( + fileContent.includes('---') || + fileContent.includes('#') || + fileContent.includes('```') || + fileContent.includes('title:') + ).toBe(true); + } +}); + +test('get_semi_document: 验证返回结果包含完整的文档内容', async () => { + const componentName = 'Table'; + const version = '2.89.2-alpha.3'; + + const result = await handleGetSemiDocument({ + componentName, + version, + }); + + expect(result).toBeDefined(); + expect(result.content).toBeDefined(); + expect(result.isError).not.toBe(true); + + const firstContent = result.content[0]; + if (firstContent.type !== 'text') { + throw new Error('Expected text content'); + } + const text = firstContent.text; + + // 检查是否有错误 + if (result.isError) { + console.warn('API 调用返回错误:', text); + return; + } + + // Table 文档很大,会自动保存到临时目录 + expect(text).toContain('文档较大'); + expect(text).toContain('请使用文件读取工具查看文档内容'); + + // 提取所有文件路径并验证 + const pathMatches = text.match(/\/tmp\/semi-docs-table[^\s]+/g); + if (pathMatches) { + for (const filePath of pathMatches) { + expect(existsSync(filePath)).toBe(true); + const fileContent = readFileSync(filePath, 'utf-8'); + expect(fileContent.length).toBeGreaterThan(0); + expect( + fileContent.includes('---') || + fileContent.includes('#') || + fileContent.includes('```') || + fileContent.includes('title:') + ).toBe(true); + } + } +}); + +test('get_semi_document: 验证 Button 组件文档内容', async () => { + const componentName = 'Button'; + const version = '2.89.2-alpha.3'; + + const result = await handleGetSemiDocument({ + componentName, + version, + }); + + expect(result).toBeDefined(); + expect(result.content).toBeDefined(); + expect(result.isError).not.toBe(true); + + const firstContent = result.content[0]; + if (firstContent.type !== 'text') { + throw new Error('Expected text content'); + } + const text = firstContent.text; + + // 检查是否有错误 + if (result.isError) { + console.warn('API 调用返回错误:', text); + return; + } + + // Button 文档应该直接返回内容(小于 888 行) + expect(text).toContain('====='); + + // 验证文档内容包含 Button 相关的关键词 + const textLower = text.toLowerCase(); + expect( + textLower.includes('button') || + textLower.includes('按钮') || + textLower.includes('click') || + textLower.includes('type') + ).toBe(true); +}); + +test('fetchDirectoryList: 测试 unpkg 数据源', async () => { + const packageName = '@douyinfe/semi-ui'; + const version = '2.89.2-alpha.3'; + const path = 'content'; + + const result = await fetchDirectoryListFromSource( + UNPKG_BASE_URL, + packageName, + version, + path, + false // isNpmMirror = false + ); + + expect(result).toBeDefined(); + expect(Array.isArray(result)).toBe(true); + expect(result.length).toBeGreaterThan(0); + + // 验证返回的数据结构 + result.forEach((item) => { + expect(item).toBeDefined(); + expect(item.path).toBeDefined(); + expect(typeof item.path).toBe('string'); + expect(item.type).toBeDefined(); + expect(['file', 'directory']).toContain(item.type); + }); + + // 验证能找到 table 相关的文件 + const tableFiles = result.filter((item) => + item.path.toLowerCase().includes('table') && item.type === 'file' + ); + expect(tableFiles.length).toBeGreaterThan(0); +}); + +test('fetchDirectoryList: 测试 npmmirror 数据源', async () => { + const packageName = '@douyinfe/semi-ui'; + const version = '2.89.2-alpha.3'; + const path = 'content'; + + try { + const result = await fetchDirectoryListFromSource( + NPMMIRROR_BASE_URL, + packageName, + version, + path, + true // isNpmMirror = true + ); + + expect(result).toBeDefined(); + expect(Array.isArray(result)).toBe(true); + expect(result.length).toBeGreaterThan(0); + + // 验证返回的数据结构 + result.forEach((item) => { + expect(item).toBeDefined(); + expect(item.path).toBeDefined(); + expect(typeof item.path).toBe('string'); + expect(item.type).toBeDefined(); + expect(['file', 'directory']).toContain(item.type); + }); + + // 验证能找到 table 相关的文件(npmmirror 可能返回嵌套结构,所以可能找不到) + const tableFiles = result.filter((item) => + item.path.toLowerCase().includes('table') && item.type === 'file' + ); + // npmmirror 可能返回嵌套结构,如果扁平化成功应该能找到,如果失败则至少验证数据结构正确 + if (tableFiles.length === 0) { + // 至少验证返回了文件或目录 + const hasFiles = result.some((item) => item.type === 'file'); + const hasDirectories = result.some((item) => item.type === 'directory'); + expect(hasFiles || hasDirectories).toBe(true); + } else { + expect(tableFiles.length).toBeGreaterThan(0); + } + } catch (error) { + // npmmirror 可能不稳定,如果失败则跳过测试 + const errorMessage = error instanceof Error ? error.message : String(error); + console.warn(`npmmirror 数据源测试失败: ${errorMessage}`); + // 不抛出错误,因为这是外部服务,可能不稳定 + } +}); + +test('fetchFileContent: 测试 unpkg 数据源', async () => { + const packageName = '@douyinfe/semi-ui'; + const version = '2.89.2-alpha.3'; + const filePath = 'content/show/table/index.md'; + + const content = await fetchFileContentFromSource( + FILE_UNPKG_BASE_URL, + packageName, + version, + filePath, + false // isNpmMirror = false + ); + + expect(content).toBeDefined(); + expect(typeof content).toBe('string'); + expect(content.length).toBeGreaterThan(0); + + // 验证文档内容是 markdown 格式 + expect( + content.includes('---') || + content.includes('#') || + content.includes('```') || + content.includes('title:') + ).toBe(true); + + // 验证不是 HTML 错误页面 + expect(content.trim().startsWith('')).toBe(false); + expect(content.includes('npmmirror 镜像站')).toBe(false); + + // 验证包含 Table 相关的内容 + const contentLower = content.toLowerCase(); + expect( + contentLower.includes('table') || + contentLower.includes('表格') + ).toBe(true); +}); + +test('fetchFileContent: 测试 npmmirror 数据源', async () => { + const packageName = '@douyinfe/semi-ui'; + const version = '2.89.2-alpha.3'; + const filePath = 'content/show/table/index.md'; + + const content = await fetchFileContentFromSource( + FILE_NPMMIRROR_BASE_URL, + packageName, + version, + filePath, + true // isNpmMirror = true + ); + + expect(content).toBeDefined(); + expect(typeof content).toBe('string'); + expect(content.length).toBeGreaterThan(0); + + // 验证文档内容是 markdown 格式 + expect( + content.includes('---') || + content.includes('#') || + content.includes('```') || + content.includes('title:') + ).toBe(true); + + // 验证不是 HTML 错误页面 + expect(content.trim().startsWith('')).toBe(false); + expect(content.includes('npmmirror 镜像站')).toBe(false); + + // 验证包含 Table 相关的内容 + const contentLower = content.toLowerCase(); + expect( + contentLower.includes('table') || + contentLower.includes('表格') + ).toBe(true); +}); + +test('fetchDirectoryList: 验证两个数据源都能正常工作', async () => { + const packageName = '@douyinfe/semi-ui'; + const version = '2.89.2-alpha.3'; + const path = 'content'; + + // 测试两个数据源都能返回数据 + const [unpkgResult, npmmirrorResult] = await Promise.allSettled([ + fetchDirectoryListFromSource(UNPKG_BASE_URL, packageName, version, path, false), + fetchDirectoryListFromSource(NPMMIRROR_BASE_URL, packageName, version, path, true), + ]); + + // 至少有一个数据源应该成功 + const unpkgSuccess = unpkgResult.status === 'fulfilled'; + const npmmirrorSuccess = npmmirrorResult.status === 'fulfilled'; + + expect(unpkgSuccess || npmmirrorSuccess).toBe(true); + + // 如果 unpkg 成功,验证数据 + if (unpkgSuccess) { + const result = unpkgResult.value; + expect(result).toBeDefined(); + expect(Array.isArray(result)).toBe(true); + expect(result.length).toBeGreaterThan(0); + } + + // 如果 npmmirror 成功,验证数据 + if (npmmirrorSuccess) { + const result = npmmirrorResult.value; + expect(result).toBeDefined(); + expect(Array.isArray(result)).toBe(true); + expect(result.length).toBeGreaterThan(0); + } + + // 如果两个都成功,验证它们都能找到 table 文件 + if (unpkgSuccess && npmmirrorSuccess) { + const unpkgTableFiles = unpkgResult.value.filter( + (item) => item.path.toLowerCase().includes('table') && item.type === 'file' + ); + const npmmirrorTableFiles = npmmirrorResult.value.filter( + (item) => item.path.toLowerCase().includes('table') && item.type === 'file' + ); + + expect(unpkgTableFiles.length).toBeGreaterThan(0); + // npmmirror 可能返回嵌套结构,如果扁平化成功应该能找到 + // 如果找不到,至少验证返回了数据 + if (npmmirrorTableFiles.length === 0) { + expect(npmmirrorResult.value.length).toBeGreaterThan(0); + } else { + expect(npmmirrorTableFiles.length).toBeGreaterThan(0); + } + } +}); + +test('fetchFileContent: 验证两个数据源都能正常工作', async () => { + const packageName = '@douyinfe/semi-ui'; + const version = '2.89.2-alpha.3'; + const filePath = 'content/show/table/index.md'; + + // 测试两个数据源都能返回数据 + const [unpkgResult, npmmirrorResult] = await Promise.allSettled([ + fetchFileContentFromSource(FILE_UNPKG_BASE_URL, packageName, version, filePath, false), + fetchFileContentFromSource(FILE_NPMMIRROR_BASE_URL, packageName, version, filePath, true), + ]); + + // 至少有一个数据源应该成功 + const unpkgSuccess = unpkgResult.status === 'fulfilled'; + const npmmirrorSuccess = npmmirrorResult.status === 'fulfilled'; + + expect(unpkgSuccess || npmmirrorSuccess).toBe(true); + + // 如果 unpkg 成功,验证数据 + if (unpkgSuccess) { + const content = unpkgResult.value; + expect(content).toBeDefined(); + expect(typeof content).toBe('string'); + expect(content.length).toBeGreaterThan(0); + expect(content.trim().startsWith('')).toBe(false); + } + + // 如果 npmmirror 成功,验证数据 + if (npmmirrorSuccess) { + const content = npmmirrorResult.value; + expect(content).toBeDefined(); + expect(typeof content).toBe('string'); + expect(content.length).toBeGreaterThan(0); + expect(content.trim().startsWith('')).toBe(false); + } + + // 如果两个都成功,验证内容相似(应该都是同一个文件) + if (unpkgSuccess && npmmirrorSuccess) { + const unpkgContent = unpkgResult.value; + const npmmirrorContent = npmmirrorResult.value; + + // 验证两个内容都包含相同的关键词 + expect(unpkgContent.toLowerCase().includes('table') || unpkgContent.toLowerCase().includes('表格')).toBe(true); + expect(npmmirrorContent.toLowerCase().includes('table') || npmmirrorContent.toLowerCase().includes('表格')).toBe(true); + + // 验证内容长度相似(允许一些差异,但不应该差太多) + const lengthDiff = Math.abs(unpkgContent.length - npmmirrorContent.length); + const avgLength = (unpkgContent.length + npmmirrorContent.length) / 2; + expect(lengthDiff / avgLength).toBeLessThan(0.1); // 差异应该小于 10% + } +}); + +test('get_semi_document: 测试 get_path 参数 - 将文档写入临时目录', async () => { + const componentName = 'Table'; + const version = '2.89.2-alpha.3'; + + const result = await handleGetSemiDocument({ + componentName, + version, + get_path: true, + }); + + expect(result).toBeDefined(); + expect(result.content).toBeDefined(); + expect(result.isError).not.toBe(true); + + const firstContent = result.content[0]; + if (firstContent.type !== 'text') { + throw new Error('Expected text content'); + } + const text = firstContent.text; + + // 检查是否有错误 + if (result.isError) { + console.warn('API 调用返回错误:', text); + return; + } + + // 验证返回结果包含临时目录信息 + expect(text).toContain('文档较大'); + expect(text).toContain('已保存到临时目录'); + expect(text).toContain('文档文件列表'); + expect(text).toContain('请使用文件读取工具查看文档内容'); + + // 提取文件路径并验证 + const pathMatches = text.match(/\/tmp\/semi-docs-table[^\s]+/g); + expect(pathMatches).toBeDefined(); + expect(pathMatches!.length).toBeGreaterThan(0); + + // 验证每个文件都存在且内容有效 + for (const filePath of pathMatches!) { + expect(existsSync(filePath)).toBe(true); + const fileContent = readFileSync(filePath, 'utf-8'); + expect(fileContent.length).toBeGreaterThan(0); + expect( + fileContent.includes('---') || + fileContent.includes('#') || + fileContent.includes('```') || + fileContent.includes('title:') + ).toBe(true); + } +}); + +test('get_semi_document: 测试 get_path=false 时返回文档内容', async () => { + const componentName = 'Divider'; // 使用小文档 + const version = '2.89.2-alpha.3'; + + const result = await handleGetSemiDocument({ + componentName, + version, + get_path: false, // 明确设置为 false + }); + + expect(result).toBeDefined(); + expect(result.content).toBeDefined(); + expect(result.isError).not.toBe(true); + + const firstContent = result.content[0]; + if (firstContent.type !== 'text') { + throw new Error('Expected text content'); + } + const text = firstContent.text; + + // 检查是否有错误 + if (result.isError) { + console.warn('API 调用返回错误:', text); + return; + } + + // 验证返回结果直接包含文档内容 + expect(text).toContain('====='); + expect(text.toLowerCase()).toContain('divider'); + + // 验证不是临时目录路径形式 + expect(text).not.toContain('已保存到临时目录'); +}); + +test('get_semi_document: 测试自动开启 get_path(文档大于 888 行)', async () => { + const componentName = 'Table'; // Table 文档有 6000+ 行,应该自动开启 + const version = '2.89.2-alpha.3'; + + const result = await handleGetSemiDocument({ + componentName, + version, + // 不设置 get_path,应该自动开启 + }); + + expect(result).toBeDefined(); + expect(result.content).toBeDefined(); + expect(result.isError).not.toBe(true); + + const firstContent = result.content[0]; + if (firstContent.type !== 'text') { + throw new Error('Expected text content'); + } + const text = firstContent.text; + + // 检查是否有错误 + if (result.isError) { + console.warn('API 调用返回错误:', text); + return; + } + + // 验证自动开启了 get_path(返回了临时目录信息) + expect(text).toContain('文档较大'); + expect(text).toContain('已保存到临时目录'); + expect(text).toContain('请使用文件读取工具查看文档内容'); + + // 验证文件路径存在 + const pathMatches = text.match(/\/tmp\/semi-docs-table[^\s]+/g); + expect(pathMatches).toBeDefined(); + expect(pathMatches!.length).toBeGreaterThan(0); +}); + +test('get_semi_document: 测试小文档不自动开启 get_path', async () => { + // 找一个行数小于 888 的组件,比如 divider (111 行) + const componentName = 'Divider'; + const version = '2.89.2-alpha.3'; + + const result = await handleGetSemiDocument({ + componentName, + version, + // 不设置 get_path + }); + + expect(result).toBeDefined(); + expect(result.content).toBeDefined(); + expect(result.isError).not.toBe(true); + + const firstContent = result.content[0]; + if (firstContent.type !== 'text') { + throw new Error('Expected text content'); + } + const text = firstContent.text; + + // 检查是否有错误 + if (result.isError) { + console.warn('API 调用返回错误:', text); + return; + } + + // 验证小文档不会自动开启 get_path(应该直接返回文档内容) + expect(text).not.toContain('已保存到临时目录'); + expect(text).toContain('====='); + expect(text.toLowerCase()).toContain('divider'); +}); diff --git a/ecosystem/semi-mcp/tsconfig.json b/ecosystem/semi-mcp/tsconfig.json new file mode 100644 index 0000000000..64b77e4893 --- /dev/null +++ b/ecosystem/semi-mcp/tsconfig.json @@ -0,0 +1,15 @@ +{ + "compilerOptions": { + "lib": ["ES2022"], + "module": "ESNext", + "noEmit": true, + "strict": true, + "skipLibCheck": true, + "isolatedModules": true, + "resolveJsonModule": true, + "moduleResolution": "bundler", + "useDefineForClassFields": true, + "allowImportingTsExtensions": true + }, + "include": ["src"] +} diff --git a/gatsby-node.js b/gatsby-node.js index 704b2a20c3..9125ae6023 100644 --- a/gatsby-node.js +++ b/gatsby-node.js @@ -156,7 +156,7 @@ exports.onCreateWebpackConfig = ({ stage, rules, loaders, plugins, actions }) => }, { test: /\.m?js/, - include: [/micromark-util-sanitize-uri/, /mdast-util-from-markdown/, /micromark/, /mdast-util-to-markdown/, /semi-foundation\/node_modules\/@mdx-js/, /jsonc-parser/], + include: [/micromark-util-sanitize-uri/, /mdast-util-from-markdown/, /micromark/, /mdast-util-to-markdown/, /semi-foundation\/node_modules\/@mdx-js/, /jsonc-parser/, /@mlc-ai\/web-llm/], use: ["esbuild-loader"] }, { diff --git a/packages/semi-foundation/clientAI/clientAI.scss b/packages/semi-foundation/clientAI/clientAI.scss new file mode 100644 index 0000000000..6997d136ea --- /dev/null +++ b/packages/semi-foundation/clientAI/clientAI.scss @@ -0,0 +1,70 @@ +@import './variables.scss'; + +$module: #{$prefix}-client-ai; + +.#{$module} { + &-wrapper { + padding: $spacing-clientAI-wrapper-padding; + max-width: $width-clientAI-wrapper-max; + margin: 0 auto; + min-height: $height-clientAI-wrapper-min; + height: $height-clientAI-wrapper-min; + display: flex; + flex-direction: column; + } + + &-loading { + padding: $spacing-clientAI-loading-paddingY $spacing-clientAI-loading-paddingX !important; + display: flex !important; + flex-direction: column !important; + justify-content: center !important; + align-items: center !important; + min-height: $height-clientAI-loading-min !important; + gap: $spacing-clientAI-loading-gap; + } + + &-loading-content { + width: 100%; + max-width: $width-clientAI-loading-content-max; + display: flex; + flex-direction: column; + gap: $spacing-clientAI-loading-content-gap; + } + + &-loading-text { + text-align: center; + font-size: $font-clientAI-loading-text-fontSize; + color: var(--semi-color-text-0); + } + + &-error { + padding: $spacing-clientAI-error-padding; + } + + &-content { + flex: 1; + display: flex; + flex-direction: column; + min-height: 0; + overflow: hidden; + } + + &-dialogue-wrapper { + flex: 1; + overflow: auto; + } + + &-input-wrapper { + margin: $spacing-clientAI-input-wrapper-margin; + min-height: $height-clientAI-input-wrapper-min; + max-height: $height-clientAI-input-wrapper-max; + flex-shrink: 0; + } + + &-input-edit { + margin: $spacing-clientAI-input-edit-marginY $spacing-clientAI-input-edit-marginX; + max-height: $height-clientAI-input-edit-max; + flex-shrink: 0; + } +} + diff --git a/packages/semi-foundation/clientAI/constants.ts b/packages/semi-foundation/clientAI/constants.ts new file mode 100644 index 0000000000..4064c25d1d --- /dev/null +++ b/packages/semi-foundation/clientAI/constants.ts @@ -0,0 +1,109 @@ +import { BASE_CLASS_PREFIX } from '../base/constants'; +import type { AppConfig, MLCEngineConfig } from './interface'; + +const cssClasses = { + PREFIX: `${BASE_CLASS_PREFIX}-client-ai`, + WRAPPER: `${BASE_CLASS_PREFIX}-client-ai-wrapper`, + LOADING: `${BASE_CLASS_PREFIX}-client-ai-loading`, + LOADING_CONTENT: `${BASE_CLASS_PREFIX}-client-ai-loading-content`, + LOADING_TEXT: `${BASE_CLASS_PREFIX}-client-ai-loading-text`, + ERROR: `${BASE_CLASS_PREFIX}-client-ai-error`, + CONTENT: `${BASE_CLASS_PREFIX}-client-ai-content`, + DIALOGUE_WRAPPER: `${BASE_CLASS_PREFIX}-client-ai-dialogue-wrapper`, + INPUT_WRAPPER: `${BASE_CLASS_PREFIX}-client-ai-input-wrapper`, + INPUT_EDIT: `${BASE_CLASS_PREFIX}-client-ai-input-edit`, +} as const; + +const strings = {} as const; + +const numbers = {} as const; + +// ============================================ +// 国外配置(使用 Hugging Face + GitHub Raw) +// ============================================ + +// Qwen3-1.7B 模型配置 - 国外 +export const Qwen3_1_7B_RECORD = { + model: 'https://huggingface.co/mlc-ai/Qwen3-1.7B-q4f32_1-MLC', + model_id: 'Qwen3-1.7B-q4f32_1-MLC', + model_lib: 'https://raw.githubusercontent.com/mlc-ai/binary-mlc-llm-libs/main/web-llm-models/v0_2_80/Qwen3-1.7B-q4f32_1-ctx4k_cs1k-webgpu.wasm', + vram_required_MB: 2635.44, + low_resource_required: true, + overrides: { + context_window_size: 40960, + }, +}; + +// Qwen3-1.7B 引擎配置 - 国外 +export const Qwen3_1_7B_ENGINE_CONFIG: MLCEngineConfig = { + appConfig: { + useIndexedDBCache: true, + model_list: [Qwen3_1_7B_RECORD], + } as AppConfig, +}; + +// Qwen3-4B 模型配置 - 国外 +export const Qwen3_4B_RECORD = { + model: 'https://huggingface.co/mlc-ai/Qwen3-4B-q4f32_1-MLC', + model_id: 'Qwen3-4B-q4f32_1-MLC', + model_lib: 'https://raw.githubusercontent.com/mlc-ai/binary-mlc-llm-libs/main/web-llm-models/v0_2_80/Qwen3-4B-q4f32_1-ctx4k_cs1k-webgpu.wasm', + vram_required_MB: 6000, + low_resource_required: false, + overrides: { + context_window_size: 40960, + }, +}; + +// Qwen3-4B 引擎配置 - 国外 +export const Qwen3_4B_ENGINE_CONFIG: MLCEngineConfig = { + appConfig: { + useIndexedDBCache: true, + model_list: [Qwen3_4B_RECORD], + } as AppConfig, +}; + +// ============================================ +// 中国配置(使用 ModelScope + jsDelivr CDN) +// ============================================ + +// Qwen3-1.7B 模型配置 - 中国 +export const Qwen3_1_7B_RECORD_CN = { + model: 'https://modelscope.cn/models/mlc-ai/Qwen3-1.7B-q4f32_1-MLC', + model_id: 'Qwen3-1.7B-q4f32_1-MLC', + model_lib: 'https://cdn.jsdelivr.net/gh/mlc-ai/binary-mlc-llm-libs@main/web-llm-models/v0_2_80/Qwen3-1.7B-q4f32_1-ctx4k_cs1k-webgpu.wasm', + vram_required_MB: 2635.44, + low_resource_required: true, + overrides: { + context_window_size: 40960, + }, +}; + +// Qwen3-1.7B 引擎配置 - 中国 +export const Qwen3_1_7B_ENGINE_CONFIG_CN: MLCEngineConfig = { + appConfig: { + useIndexedDBCache: true, + model_list: [Qwen3_1_7B_RECORD_CN], + } as AppConfig, +}; + +// Qwen3-4B 模型配置 - 中国 +export const Qwen3_4B_RECORD_CN = { + model: 'https://modelscope.cn/models/mlc-ai/Qwen3-4B-q4f32_1-MLC', + model_id: 'Qwen3-4B-q4f32_1-MLC', + model_lib: 'https://cdn.jsdelivr.net/gh/mlc-ai/binary-mlc-llm-libs@main/web-llm-models/v0_2_80/Qwen3-4B-q4f32_1-ctx4k_cs1k-webgpu.wasm', + vram_required_MB: 6000, + low_resource_required: false, + overrides: { + context_window_size: 40960, + }, +}; + +// Qwen3-4B 引擎配置 - 中国 +export const Qwen3_4B_ENGINE_CONFIG_CN: MLCEngineConfig = { + appConfig: { + useIndexedDBCache: true, + model_list: [Qwen3_4B_RECORD_CN], + } as AppConfig, +}; + +export { cssClasses, strings, numbers }; diff --git a/packages/semi-foundation/clientAI/foundation.ts b/packages/semi-foundation/clientAI/foundation.ts new file mode 100644 index 0000000000..3131ae392a --- /dev/null +++ b/packages/semi-foundation/clientAI/foundation.ts @@ -0,0 +1,1380 @@ +import BaseFoundation, { DefaultAdapter } from '../base/foundation'; +import { CreateWebWorkerMLCEngine, CreateMLCEngine } from '@mlc-ai/web-llm'; +import type { + WebWorkerMLCEngine, + MLCEngineConfig, + ChatOptions, + InitProgressCallback, + InitProgressReport, + WorkerConfig, +} from './interface'; +import type { MessageContent } from '../aiChatInput/interface'; +import type { Message } from '../aiChatDialogue/foundation'; +import chatInputToMessage from '../aiChatDialogue/dataAdapter/chatInputToMessage'; +import { getUuidv4 } from '../utils/uuid'; + +/** + * WebLLM 内部使用的消息格式 + */ +export interface WebLLMMessage { + role: 'system' | 'user' | 'assistant'; + content: string +} + +/** + * Engine 缓存条目 + */ +interface EngineCacheEntry { + engine: WebWorkerMLCEngine; + refCount: number +} + +/** + * 初始化引擎的配置参数 + */ +export interface InitEngineConfig { + modelId: string | string[]; + worker?: WorkerConfig; + engineConfig?: MLCEngineConfig; + chatOpts?: ChatOptions | ChatOptions[] +} + +/** + * 全局 Engine 管理器(单例模式) + * 确保同一个 modelId 只下载和加载一次 + */ +class EngineManager { + private static instance: EngineManager; + + // 已加载的 engine 缓存 + private engineCache: Map = new Map(); + + // 正在加载的 Promise 缓存(防止重复下载) + private loadingPromises: Map> = new Map(); + + private constructor() {} + + static getInstance(): EngineManager { + if (!EngineManager.instance) { + EngineManager.instance = new EngineManager(); + } + return EngineManager.instance; + } + + /** + * 生成缓存 key + * 使用 modelId 作为主要标识 + */ + private getCacheKey(modelId: string | string[]): string { + if (Array.isArray(modelId)) { + return modelId.sort().join('|'); + } + return modelId; + } + + /** + * 创建引擎(内部方法) + */ + private async createEngine(config: InitEngineConfig): Promise { + const { modelId, worker, engineConfig, chatOpts } = config; + + // 判断是否使用 Worker + const useWorker = worker?.enabled !== false && worker?.url; + + let engine: WebWorkerMLCEngine; + + if (useWorker && worker?.url) { + // 使用 Worker 模式 + const workerInstance = new Worker(worker.url, { type: 'module' }); + engine = await CreateWebWorkerMLCEngine( + workerInstance, + modelId, + engineConfig, + chatOpts + ); + } else { + // 非 Worker 模式(主线程模式) + engine = await CreateMLCEngine(modelId, engineConfig, chatOpts) as any; + } + + return engine; + } + + /** + * 获取或创建 engine + * 如果已存在则直接返回,如果正在加载则等待,否则创建新的 + */ + async getOrCreateEngine(config: InitEngineConfig): Promise { + const { modelId } = config; + const cacheKey = this.getCacheKey(modelId); + + // 1. 检查是否已有缓存的 engine + const cached = this.engineCache.get(cacheKey); + if (cached) { + cached.refCount++; + return cached.engine; + } + + // 2. 检查是否正在加载 + const loadingPromise = this.loadingPromises.get(cacheKey); + if (loadingPromise) { + const engine = await loadingPromise; + // 加载完成后增加引用计数 + const entry = this.engineCache.get(cacheKey); + if (entry) { + entry.refCount++; + } + return engine; + } + + // 3. 创建新的加载任务 + // 注意:即使模型文件已缓存,web-llm 在初始化时(加载到 GPU 等)也会触发进度回调 + const createPromise = (async () => { + try { + const engine = await this.createEngine(config); + + // 加载完成,存入缓存 + this.engineCache.set(cacheKey, { + engine, + refCount: 1, + }); + + return engine; + } finally { + // 无论成功失败,都清除 loading promise + this.loadingPromises.delete(cacheKey); + } + })(); + + this.loadingPromises.set(cacheKey, createPromise); + return createPromise; + } + + /** + * 释放 engine 引用 + * 当引用计数为 0 时,可以选择卸载 engine + */ + async releaseEngine(modelId: string | string[], unload: boolean = false): Promise { + const cacheKey = this.getCacheKey(modelId); + const entry = this.engineCache.get(cacheKey); + + if (!entry) { + return; + } + + entry.refCount--; + + // 当没有组件使用时,可以选择卸载 + if (entry.refCount <= 0 && unload) { + try { + if (entry.engine && typeof entry.engine.unload === 'function') { + await entry.engine.unload(); + } + } catch (error) { + // 忽略卸载错误 + } + this.engineCache.delete(cacheKey); + } + } + + /** + * 检查 engine 是否已缓存 + */ + hasEngine(modelId: string | string[]): boolean { + const cacheKey = this.getCacheKey(modelId); + return this.engineCache.has(cacheKey); + } + + /** + * 检查 engine 是否正在加载 + */ + isLoading(modelId: string | string[]): boolean { + const cacheKey = this.getCacheKey(modelId); + return this.loadingPromises.has(cacheKey); + } + + /** + * 获取缓存的 engine(不增加引用计数) + */ + getEngine(modelId: string | string[]): WebWorkerMLCEngine | null { + const cacheKey = this.getCacheKey(modelId); + return this.engineCache.get(cacheKey)?.engine || null; + } + + /** + * 清除所有缓存(用于测试或完全重置) + */ + async clearAll(): Promise { + for (const [, entry] of this.engineCache) { + try { + if (entry.engine && typeof entry.engine.unload === 'function') { + await entry.engine.unload(); + } + } catch (error) { + // 忽略卸载错误 + } + } + this.engineCache.clear(); + this.loadingPromises.clear(); + } +} + +// 内部单例实例 +const engineManager = EngineManager.getInstance(); + +/** + * Tool Call 结果类型 + */ +export interface ToolCallResult { + call_id: string; + name: string; + arguments: string; + result: string; + status: 'success' | 'error' +} + +/** + * Tool Call 类型(用于回调) + */ +export interface ToolCall { + type: 'function_call'; + call_id: string; + name: string; + arguments: string; + status: string +} + +/** + * ClientAI Foundation Adapter + */ +export interface ClientAIAdapter

, S = Record> extends DefaultAdapter { + setEngine: (engine: WebWorkerMLCEngine | null) => void; + setLoading: (loading: boolean) => void; + setError: (error: string | null) => void; + setChats: (chats: Message[]) => void; + setIsGenerating: (isGenerating: boolean) => void; + setAbortController: (controller: AbortController | null) => void; + setMessages: (messages: WebLLMMessage[]) => void; + setInitProgress?: (progress: InitProgressReport | null) => void; + notifyError: (error: Error) => void; + notifyInitProgress?: (progress: any) => void; + notifyToolCall?: (toolCalls: ToolCall[], rawOutput: string) => void +} + +/** + * 根据浏览器语言获取默认的 system prompt + */ +function getDefaultSystemPrompt(): string { + if (typeof navigator !== 'undefined' && navigator.language) { + const lang = navigator.language.toLowerCase(); + if (lang.startsWith('zh')) { + return '你是一个有用的 AI 助手。使用中文回复用户。'; + } + } + return 'You are a helpful AI assistant. Reply to users in English.'; +} + +/** + * ClientAI Foundation + * 包含所有业务逻辑,不依赖 React/DOM + */ +export default class ClientAIFoundation extends BaseFoundation { + private messageIdCounter = 0; + + constructor(adapter: ClientAIAdapter) { + super({ ...ClientAIFoundation.defaultAdapter, ...adapter }); + } + + static get defaultAdapter() { + return { + ...BaseFoundation.defaultAdapter, + setEngine: () => {}, + setLoading: () => {}, + setError: () => {}, + setChats: () => {}, + setIsGenerating: () => {}, + setAbortController: () => {}, + setMessages: () => {}, + setInitProgress: () => {}, + notifyError: () => {}, + notifyInitProgress: () => {}, + notifyToolCall: () => {}, + }; + } + + /** + * 生成唯一消息 ID + */ + private generateId = (): string => { + return `msg_${Date.now()}_${++this.messageIdCounter}`; + }; + + /** + * 从 MessageContent 中提取文本内容 + */ + extractTextFromMessageContent = (messageContent: MessageContent): string => { + if (!messageContent.inputContents || messageContent.inputContents.length === 0) { + return ''; + } + + // 提取所有文本内容 + return messageContent.inputContents + .map((content) => { + if (content.type === 'text' || content.type === 'paragraph') { + return content.text || content.content || ''; + } + return ''; + }) + .join('') + .trim(); + }; + + /** + * 从 Message 中提取文本内容 + */ + private extractTextFromMessage = (message: Message): string => { + if (typeof message.content === 'string') { + return message.content; + } + if (Array.isArray(message.content)) { + // 提取所有文本内容 + let text = ''; + message.content.forEach((item: any) => { + if (item.type === 'message' && Array.isArray(item.content)) { + item.content.forEach((contentItem: any) => { + if (contentItem.type === 'input_text' || contentItem.type === 'output_text') { + text += (contentItem.text || ''); + } + }); + } else if (item.type === 'output_text') { + text += (item.text || ''); + } + }); + return text.trim(); + } + return ''; + }; + + /** + * 将 Message[] 转换为 WebLLMMessage[] + */ + private convertMessagesToWebLLM = (messages: Message[]): WebLLMMessage[] => { + return messages + .filter((msg) => msg.role === 'user' || msg.role === 'assistant') + .map((msg) => ({ + role: msg.role as 'user' | 'assistant', + content: this.extractTextFromMessage(msg), + })) + .filter((msg) => msg.content); // 过滤掉空内容 + }; + + /** + * 判断是否是 Qwen 系列模型 + */ + private isQwenModel(modelId: string | string[]): boolean { + const ids = Array.isArray(modelId) ? modelId : [modelId]; + return ids.some(id => id.toLowerCase().includes('qwen')); + } + + /** + * 构建 Qwen 风格的带 tools 的 system prompt + * 因为 webLLM 的 MLC 编译配置中 use_function_calling: false, + * 所以我们需要手动将 tools 信息注入到 system prompt 中 + */ + buildQwenSystemPromptWithTools = (originalPrompt: string, tools: any[]): string => { + const toolsJson = tools.map(t => JSON.stringify(t, null, 2)).join('\n'); + + return `${originalPrompt} + +# Tools + +You may call one or more functions to assist with the user query. + +You are provided with function signatures within XML tags: + +${toolsJson} + + +For each function call, return a json object with function name and arguments within XML tags: + +{"name": , "arguments": } +`; + }; + + /** + * 解析 Qwen 输出中的 标签 + * 返回 Semi AIChatDialogue 兼容的 FunctionToolCall 格式 + */ + parseQwenToolCalls = (output: string, isStreaming: boolean = false): Array<{ + type: 'function_call'; + call_id: string; + name: string; + arguments: string; + status: string + }> | null => { + // 匹配已闭合的 ... 标签 + const closedRegex = /\s*([\s\S]*?)\s*<\/tool_call>/g; + const toolCalls: Array<{ + type: 'function_call'; + call_id: string; + name: string; + arguments: string; + status: string + }> = []; + + let match; + while ((match = closedRegex.exec(output)) !== null) { + try { + const parsed = JSON.parse(match[1].trim()); + toolCalls.push({ + type: 'function_call', + call_id: `call_${getUuidv4()}`, + name: parsed.name, + arguments: typeof parsed.arguments === 'object' + ? JSON.stringify(parsed.arguments) + : (parsed.arguments || '{}'), + status: isStreaming ? 'in_progress' : 'completed' + }); + } catch (e) { + // 解析失败,跳过这个 tool call + console.warn('Failed to parse tool call:', match[1]); + } + } + + // 检查是否有未闭合的 标签(正在生成中) + const openMatch = output.match(/(?![\s\S]*<\/tool_call>)([\s\S]*)$/); + if (openMatch && isStreaming) { + // 正在生成中的 tool call,尝试部分解析 + try { + const partialContent = openMatch[1].trim(); + if (partialContent) { + toolCalls.push({ + type: 'function_call', + call_id: `call_pending_${Date.now()}`, + name: '解析中...', + arguments: partialContent, + status: 'in_progress' + }); + } + } catch (e) { + // 忽略解析错误 + } + } + + return toolCalls.length > 0 ? toolCalls : null; + }; + + /** + * 移除 标签后的纯文本内容 + */ + getContentWithoutToolCalls = (output: string): string => { + return output + .replace(/[\s\S]*?<\/tool_call>/g, '') + .replace(/[\s\S]*$/, '') // 移除未闭合的 tool_call + .trim(); + }; + + /** + * 解析包含 标签的内容,转换为 AIChatDialogue 格式(支持流式) + */ + parseContentWithToolCalls = (text: string, isStreaming: boolean = false): Message['content'] => { + const contentItems: any[] = []; + + // 1. 先解析 tool calls + const toolCalls = this.parseQwenToolCalls(text, isStreaming); + + // 2. 获取移除 tool_call 后的文本 + const textWithoutToolCalls = this.getContentWithoutToolCalls(text); + + // 3. 解析 thinking 内容 + if (textWithoutToolCalls) { + const thinkingContent = this.parseThinkingContent(textWithoutToolCalls, isStreaming); + if (Array.isArray(thinkingContent)) { + contentItems.push(...thinkingContent); + } else if (thinkingContent) { + contentItems.push({ + type: 'message', + content: [{ type: 'output_text', text: thinkingContent }], + status: isStreaming ? 'in_progress' : 'completed', + }); + } + } + + // 4. 添加 tool calls + if (toolCalls && toolCalls.length > 0) { + contentItems.push(...toolCalls); + } + + return contentItems.length > 0 ? contentItems : text; + }; + + /** + * 解析包含 标签的内容,转换为 AIChatDialogue 格式(支持流式) + */ + parseThinkingContent = (text: string, isStreaming: boolean = false): Message['content'] => { + const contentItems: any[] = []; + + // 匹配已闭合的 ... 标签 + const closedThinkRegex = /([\s\S]*?)<\/think>/gi; + const closedMatches = Array.from(text.matchAll(closedThinkRegex)); + + // 检查是否有未闭合的 标签(正在思考中) + const openThinkMatch = text.match(/(?![\s\S]*<\/think>)([\s\S]*)$/i); + const hasUnclosedThink = openThinkMatch !== null; + + // 如果没有任何 think 标签 + if (closedMatches.length === 0 && !hasUnclosedThink) { + return text; + } + + let lastIndex = 0; + + // 处理所有已闭合的 thinking 内容 + closedMatches.forEach((match) => { + const thinkContent = match[1].trim(); + const matchStart = match.index!; + const matchEnd = matchStart + match[0].length; + + // 添加 thinking 标签之前的文本(如果有) + if (matchStart > lastIndex) { + const beforeText = text.substring(lastIndex, matchStart).trim(); + if (beforeText) { + contentItems.push({ + type: 'message', + content: [{ type: 'output_text', text: beforeText }], + status: 'completed', + }); + } + } + + // 添加已完成的 thinking 块 + if (thinkContent) { + contentItems.push({ + type: 'reasoning', + status: 'completed', + summary: [{ type: 'summary_text', text: thinkContent }], + }); + } + + lastIndex = matchEnd; + }); + + // 处理未闭合的 thinking 内容(流式显示) + if (hasUnclosedThink && openThinkMatch) { + const unclosedStart = text.lastIndexOf(''); + + // 添加未闭合 think 标签之前的文本 + if (unclosedStart > lastIndex) { + const beforeText = text.substring(lastIndex, unclosedStart).trim(); + if (beforeText) { + contentItems.push({ + type: 'message', + content: [{ type: 'output_text', text: beforeText }], + status: 'completed', + }); + } + } + + // 提取正在生成的思考内容 + const thinkingContent = openThinkMatch[1].trim(); + contentItems.push({ + type: 'reasoning', + status: isStreaming ? 'in_progress' : 'completed', + summary: [{ type: 'summary_text', text: thinkingContent || '思考中...' }], + }); + } else { + // 添加最后一个闭合标签之后的文本 + const afterText = text.substring(lastIndex).trim(); + if (afterText) { + contentItems.push({ + type: 'message', + content: [{ type: 'output_text', text: afterText }], + status: isStreaming ? 'in_progress' : 'completed', + }); + } + } + + return contentItems.length > 0 ? contentItems : text; + }; + + /** + * 初始化引擎(单例模式,多个组件共享同一个模型) + */ + initEngine = async (config: InitEngineConfig) => { + try { + this._adapter.setLoading(true); + this._adapter.setError(null); + this._adapter.setInitProgress?.(null); + + // 创建进度回调函数 + const progressCallback: InitProgressCallback = (report: InitProgressReport) => { + // 更新进度状态 + this._adapter.setInitProgress?.(report); + // 同时调用用户自定义的回调 + const originalCallback = config.engineConfig?.initProgressCallback; + if (originalCallback) { + originalCallback(report); + } + }; + + // 合并 engineConfig,添加进度回调 + // 注意:如果用户已经提供了 initProgressCallback,我们需要同时调用两个回调 + const originalCallback = config.engineConfig?.initProgressCallback; + const engineConfigWithProgress = { + ...config.engineConfig, + initProgressCallback: (report: InitProgressReport) => { + // 调用我们的进度回调 + progressCallback(report); + // 调用用户自定义的回调(如果存在) + if (originalCallback) { + originalCallback(report); + } + }, + }; + + // 使用 engineManager 获取或创建 engine(单例模式) + // 注意:即使 engine 已经缓存,web-llm 在初始化时(加载到 GPU 等)也会触发进度回调 + // 所以进度回调会在 createEngine 过程中被调用 + const engine = await engineManager.getOrCreateEngine({ + ...config, + engineConfig: engineConfigWithProgress, + }); + + this._adapter.setEngine(engine); + this._adapter.setLoading(false); + this._adapter.setInitProgress?.(null); + + // 初始化系统消息 + const props = this.getProps(); + const { systemPrompt, chatOpts } = props; + const modelId = config.modelId; // 使用传入的 modelId(已处理默认值) + let finalSystemPrompt = systemPrompt || getDefaultSystemPrompt(); + + // 如果有 tools 配置且是 Qwen 模型,将 tools 信息注入到 system prompt + // 因为 webLLM 的 MLC 编译配置中 use_function_calling: false + const chatOptsArray = Array.isArray(chatOpts) ? chatOpts : chatOpts ? [chatOpts] : []; + const firstChatOpts = chatOptsArray[0] || {}; + const { tools } = firstChatOpts as any; + + if (tools && tools.length > 0 && this.isQwenModel(modelId)) { + finalSystemPrompt = this.buildQwenSystemPromptWithTools(finalSystemPrompt, tools); + } + + const systemMessage: WebLLMMessage = { + role: 'system', + content: finalSystemPrompt, + }; + + // 获取现有的 messages(可能包含 defaultMessages) + const existingMessages = this._adapter.getState('messages') || []; + const existingWebLLMMessages = existingMessages.filter((msg: any) => + msg.role === 'user' || msg.role === 'assistant' + ); + + // 合并系统消息和现有消息 + this._adapter.setMessages([systemMessage, ...existingWebLLMMessages]); + } catch (error) { + const err = error instanceof Error ? error : new Error('Unknown error'); + this._adapter.setError(err.message); + this._adapter.setLoading(false); + this._adapter.setInitProgress?.(null); + this._adapter.notifyError(err); + throw err; + } + }; + + /** + * 处理流式聊天 + */ + handleStreamChat = async (messageContent: MessageContent) => { + const engine = this._adapter.getState('engine'); + const messages = this._adapter.getState('messages') || []; + const chats = this._adapter.getState('chats') || []; + const isGenerating = this._adapter.getState('isGenerating'); + + // 从 MessageContent 中提取文本 + let inputText = this.extractTextFromMessageContent(messageContent); + + if (!engine || !inputText || isGenerating) { + return; + } + + // 应用 onUserMessage 回调 + const props = this.getProps(); + if (props.onUserMessage) { + try { + const modifiedContent = props.onUserMessage(inputText, messages); + if (modifiedContent !== undefined && modifiedContent !== null) { + inputText = modifiedContent || inputText; + } + } catch (error) { + // 如果回调出错,使用原始输入 + console.warn('onUserMessage callback error:', error); + } + } + + // 创建 AbortController 用于停止生成 + const abortController = new AbortController(); + this._adapter.setAbortController(abortController); + + // 使用 chatInputToMessage 转换消息格式 + const userChatMessage: Message = { + ...chatInputToMessage(messageContent), + id: this.generateId(), + createdAt: Date.now(), + status: 'completed', + }; + // 更新用户消息内容为修改后的内容 + // chatInputToMessage 返回的 content 可能是数组格式,需要统一处理 + if (typeof userChatMessage.content === 'string') { + userChatMessage.content = inputText; + } else if (Array.isArray(userChatMessage.content)) { + // 如果是数组格式,需要更新数组中的文本内容 + const updatedContent = userChatMessage.content.map((item: any) => { + if (item.type === 'message' && Array.isArray(item.content)) { + // 分离文本项和其他项(图片、文件等) + const textItems: any[] = []; + const otherItems: any[] = []; + + item.content.forEach((contentItem: any) => { + if (contentItem.type === 'input_text') { + textItems.push(contentItem); + } else { + otherItems.push(contentItem); + } + }); + + // 将所有文本项合并为一个,使用修改后的文本 + const newContent = []; + if (textItems.length > 0 && inputText) { + newContent.push({ + type: 'input_text', + text: inputText, + }); + } + // 保留其他类型的项 + newContent.push(...otherItems); + + return { + ...item, + content: newContent, + }; + } + return item; + }); + userChatMessage.content = updatedContent; + } + + // 添加用户消息到 WebLLM 格式(历史记录中保存原始消息,不带 /no_think) + const userMessage: WebLLMMessage = { role: 'user', content: inputText }; + const updatedMessages = [...messages, userMessage]; + const updatedChats = [...chats, userChatMessage]; + + // 创建助手消息占位符(流式更新) + const assistantId = this.generateId(); + const assistantChatMessage: Message = { + role: 'assistant', + id: assistantId, + content: '', + createdAt: Date.now(), + status: 'in_progress', + }; + const chatsWithAssistant = [...updatedChats, assistantChatMessage]; + + this._adapter.setMessages(updatedMessages); + this._adapter.setChats(chatsWithAssistant); + this._adapter.setIsGenerating(true); + + try { + // 应用 beforeAIInput 回调 + let customResponse: string | undefined; + if (props.beforeAIInput) { + try { + const result = await props.beforeAIInput(updatedMessages); + if (result && result.trim() !== '') { + customResponse = result; + } + } catch (error) { + // 如果回调出错,继续正常调用AI + console.warn('beforeAIInput callback error:', error); + } + } + + // 如果 beforeAIInput 返回了自定义回复,跳过AI调用 + if (customResponse !== undefined) { + // 获取配置以判断是否需要解析 tool calls + const { modelId, chatOpts } = props; + const isQwen = modelId ? this.isQwenModel(modelId) : false; + const chatOptsArray = Array.isArray(chatOpts) ? chatOpts : chatOpts ? [chatOpts] : []; + const firstChatOpts = chatOptsArray[0] || {}; + const { tools } = firstChatOpts as any; + const hasTools = tools && tools.length > 0; + + // 解析自定义回复内容 + const hasToolCallInReply = customResponse.includes(''); + const parsedContent = (hasToolCallInReply && hasTools && isQwen) + ? this.parseContentWithToolCalls(customResponse, false) + : this.parseThinkingContent(customResponse, false); + + // 更新助手消息为完成状态 + const finalChats = this._adapter.getState('chats') || []; + const updatedFinalChats = [...finalChats]; + const assistantIndex = updatedFinalChats.findIndex((msg) => msg.id === assistantId); + if (assistantIndex !== -1) { + updatedFinalChats[assistantIndex] = { + ...updatedFinalChats[assistantIndex], + content: parsedContent, + status: 'completed', + }; + } + + this._adapter.setMessages([...updatedMessages, { role: 'assistant', content: customResponse }]); + this._adapter.setChats(updatedFinalChats); + this._adapter.setIsGenerating(false); + this._adapter.setAbortController(null); + return; + } + + // 从 props 获取配置,判断是否需要特殊处理 tool calling + const { modelId, chatOpts } = props; + const isQwen = modelId ? this.isQwenModel(modelId) : false; + + // 获取 tools 配置(用于判断是否需要解析 tool_call 输出) + const chatOptsArray = Array.isArray(chatOpts) ? chatOpts : chatOpts ? [chatOpts] : []; + const firstChatOpts = chatOptsArray[0] || {}; + const { tools } = firstChatOpts as any; + const hasTools = tools && tools.length > 0; + + // 获取深度思考开关状态(从 state 中获取,因为这是由组件内部管理的状态) + const enableDeepThink = this._adapter.getState('enableDeepThink'); + + // 创建发送给模型的消息列表 + // 如果关闭深度思考,在最后一条用户消息末尾添加 /no_think 标签(Qwen3 模型支持) + // 注意:这里需要深拷贝消息列表,避免修改历史记录 + const messagesForRequest = updatedMessages.map((msg, index) => { + // 只处理最后一条用户消息 + if (index === updatedMessages.length - 1 && msg.role === 'user' && enableDeepThink === false) { + return { + ...msg, + content: `${msg.content} /no_think` + }; + } + return msg; + }); + + // 创建流式聊天请求 + // 注意:对于 Qwen 模型,不传 tools 参数给 webLLM(因为 webLLM 会报 UnsupportedModelIdError) + // tools 信息已经在 initEngine 时注入到 system prompt 中了 + const requestParams: any = { + messages: messagesForRequest, + temperature: 1, + stream: true, + stream_options: { include_usage: true }, + }; + + const chunks = await engine.chat.completions.create(requestParams); + + // 获取 stream 配置 + const streamEnabled = props.stream !== false; // 默认为 true + + let reply = ''; + for await (const chunk of chunks) { + // 检查是否被中止 + if (abortController.signal.aborted) { + break; + } + + const deltaContent = chunk.choices[0]?.delta.content || ''; + if (deltaContent) { + reply += deltaContent; + + // 如果 stream 为 false,不更新UI,只收集内容 + if (streamEnabled) { + // 检测是否包含特殊标签(),流式解析 + let displayContent: string | any[] = reply; + const hasThinkTag = reply.includes(''); + const hasToolCallTag = reply.includes(''); + + if (hasThinkTag || (hasToolCallTag && hasTools && isQwen)) { + // 使用流式解析,支持显示未闭合的内容 + try { + const parsed = hasToolCallTag && hasTools && isQwen + ? this.parseContentWithToolCalls(reply, true) + : this.parseThinkingContent(reply, true); + if (Array.isArray(parsed)) { + displayContent = parsed; + } + } catch (e) { + // 解析失败时继续使用原始文本 + displayContent = reply; + } + } + + // 更新助手消息内容 + const currentChats = this._adapter.getState('chats') || []; + const updatedChatsWithReply = [...currentChats]; + const assistantIndex = updatedChatsWithReply.findIndex((msg) => msg.id === assistantId); + if (assistantIndex !== -1) { + updatedChatsWithReply[assistantIndex] = { + ...updatedChatsWithReply[assistantIndex], + content: displayContent, + status: 'in_progress', + }; + } + this._adapter.setChats(updatedChatsWithReply); + } + } + } + + // 获取完整回复 + const fullReply = await engine.getMessage(); + let finalReply = reply || fullReply; + + // 应用 afterAIInput 回调 + if (props.afterAIInput) { + try { + const modifiedContent = await props.afterAIInput(finalReply, [...updatedMessages, { role: 'assistant', content: finalReply }]); + if (modifiedContent !== undefined && modifiedContent !== null) { + finalReply = modifiedContent; + } + } catch (error) { + // 如果回调出错,使用原始回复 + console.warn('afterAIInput callback error:', error); + } + } + + // 解析内容并转换为 AIChatDialogue 格式(非流式,标记为完成) + // 如果有 tools 且是 Qwen 模型,使用 parseContentWithToolCalls 同时处理 thinking 和 tool_calls + const hasToolCallInReply = finalReply.includes(''); + const parsedContent = (hasToolCallInReply && hasTools && isQwen) + ? this.parseContentWithToolCalls(finalReply, false) + : this.parseThinkingContent(finalReply, false); + + // 如果 stream 为 false,现在更新UI + if (!streamEnabled) { + const currentChats = this._adapter.getState('chats') || []; + const updatedChatsWithReply = [...currentChats]; + const assistantIndex = updatedChatsWithReply.findIndex((msg) => msg.id === assistantId); + if (assistantIndex !== -1) { + updatedChatsWithReply[assistantIndex] = { + ...updatedChatsWithReply[assistantIndex], + content: parsedContent, + status: abortController.signal.aborted ? 'cancelled' : 'completed', + }; + } + this._adapter.setChats(updatedChatsWithReply); + } + + // 更新助手消息为完成状态(仅在 stream=true 时执行,stream=false 时已在上面更新) + if (streamEnabled) { + const finalChats = this._adapter.getState('chats') || []; + const updatedFinalChats = [...finalChats]; + const assistantIndex = updatedFinalChats.findIndex((msg) => msg.id === assistantId); + if (assistantIndex !== -1) { + updatedFinalChats[assistantIndex] = { + ...updatedFinalChats[assistantIndex], + content: parsedContent, + status: abortController.signal.aborted ? 'cancelled' : 'completed', + }; + } + this._adapter.setChats(updatedFinalChats); + } else { + // stream=false 时,只需要更新状态为完成(内容已在上面更新) + const finalChats = this._adapter.getState('chats') || []; + const updatedFinalChats = [...finalChats]; + const assistantIndex = updatedFinalChats.findIndex((msg) => msg.id === assistantId); + if (assistantIndex !== -1) { + updatedFinalChats[assistantIndex] = { + ...updatedFinalChats[assistantIndex], + status: abortController.signal.aborted ? 'cancelled' : 'completed', + }; + } + this._adapter.setChats(updatedFinalChats); + } + + this._adapter.setMessages([...updatedMessages, { role: 'assistant', content: finalReply }]); + this._adapter.setIsGenerating(false); + this._adapter.setAbortController(null); + + // 如果检测到 tool calls,处理工具调用 + if (hasToolCallInReply && hasTools && isQwen) { + const toolCalls = this.parseQwenToolCalls(finalReply, false); + if (toolCalls && toolCalls.length > 0) { + const props = this.getProps(); + + // 优先使用 handleToolCall(方案2:自动处理) + if (props.handleToolCall) { + try { + const toolResults = await props.handleToolCall(toolCalls, finalReply); + if (toolResults && toolResults.length > 0) { + // 自动发送工具执行结果 + await this.sendToolResults(toolResults); + } + } catch (error) { + // 如果 handleToolCall 出错,显示错误但不中断流程 + const err = error instanceof Error ? error : new Error('工具调用处理失败'); + this._adapter.setError(err.message); + this._adapter.notifyError(err); + } + } else { + // 向后兼容:如果没有 handleToolCall,使用 onToolCall(方案1:手动处理) + this._adapter.notifyToolCall?.(toolCalls, finalReply); + } + } + } + } catch (error) { + // 更新助手消息为失败状态 + const errorChats = this._adapter.getState('chats') || []; + const updatedErrorChats = [...errorChats]; + const assistantIndex = updatedErrorChats.findIndex((msg) => msg.id === assistantId); + if (assistantIndex !== -1) { + updatedErrorChats[assistantIndex] = { + ...updatedErrorChats[assistantIndex], + status: 'failed', + }; + } + + const err = error instanceof Error ? error : new Error('聊天出错'); + this._adapter.setError(err.message); + this._adapter.setChats(updatedErrorChats); + this._adapter.setIsGenerating(false); + this._adapter.setAbortController(null); + this._adapter.notifyError(err); + } + }; + + /** + * 发送 Tool 执行结果,让 AI 继续对话 + * @param toolResults Tool 执行结果数组 + */ + sendToolResults = async (toolResults: ToolCallResult[]) => { + const engine = this._adapter.getState('engine'); + const messages = this._adapter.getState('messages') || []; + const chats = this._adapter.getState('chats') || []; + const isGenerating = this._adapter.getState('isGenerating'); + + if (!engine || isGenerating || toolResults.length === 0) { + return; + } + + // 创建 AbortController + const abortController = new AbortController(); + this._adapter.setAbortController(abortController); + + // 构建 tool response 消息(Qwen 格式) + const toolResponseContent = toolResults.map(result => + `\n${result.result}\n` + ).join('\n'); + + // 添加 tool response 到消息历史 + const toolResponseMessage: WebLLMMessage = { + role: 'user', // Qwen chat_template 中 tool response 是作为 user 消息的一部分 + content: toolResponseContent + }; + const updatedMessages = [...messages, toolResponseMessage]; + + // 创建 tool response 的 chat 消息(用于显示) + const toolResponseChatId = this.generateId(); + const toolResponseChat: Message = { + role: 'assistant', // 在 UI 上显示为助手消息的一部分 + id: toolResponseChatId, + content: toolResults.map(result => ({ + type: 'function_call', + call_id: result.call_id, + name: result.name, + arguments: result.arguments, + result: result.result, + status: result.status === 'success' ? 'completed' : 'failed' + })), + createdAt: Date.now(), + status: 'completed', + }; + + // 创建助手回复占位符 + const assistantId = this.generateId(); + const assistantChatMessage: Message = { + role: 'assistant', + id: assistantId, + content: '', + createdAt: Date.now(), + status: 'in_progress', + }; + + const updatedChats = [...chats, assistantChatMessage]; + + this._adapter.setMessages(updatedMessages); + this._adapter.setChats(updatedChats); + this._adapter.setIsGenerating(true); + + try { + const props = this.getProps(); + + // 应用 beforeAIInput 回调 + let customResponse: string | undefined; + if (props.beforeAIInput) { + try { + const result = await props.beforeAIInput(updatedMessages); + if (result && result.trim() !== '') { + customResponse = result; + } + } catch (error) { + // 如果回调出错,继续正常调用AI + console.warn('beforeAIInput callback error:', error); + } + } + + // 如果 beforeAIInput 返回了自定义回复,跳过AI调用 + if (customResponse !== undefined) { + // 获取配置以判断是否需要解析 tool calls + const { modelId, chatOpts } = props; + const isQwen = modelId ? this.isQwenModel(modelId) : false; + const chatOptsArray = Array.isArray(chatOpts) ? chatOpts : chatOpts ? [chatOpts] : []; + const firstChatOpts = chatOptsArray[0] || {}; + const { tools } = firstChatOpts as any; + const hasTools = tools && tools.length > 0; + + // 解析自定义回复内容 + const hasToolCallInReply = customResponse.includes(''); + const parsedContent = (hasToolCallInReply && hasTools && isQwen) + ? this.parseContentWithToolCalls(customResponse, false) + : this.parseThinkingContent(customResponse, false); + + // 更新助手消息为完成状态 + const finalChats = this._adapter.getState('chats') || []; + const updatedFinalChats = [...finalChats]; + const assistantIndex = updatedFinalChats.findIndex((msg) => msg.id === assistantId); + if (assistantIndex !== -1) { + updatedFinalChats[assistantIndex] = { + ...updatedFinalChats[assistantIndex], + content: parsedContent, + status: 'completed', + }; + } + + this._adapter.setMessages([...updatedMessages, { role: 'assistant', content: customResponse }]); + this._adapter.setChats(updatedFinalChats); + this._adapter.setIsGenerating(false); + this._adapter.setAbortController(null); + return; + } + + const { modelId, chatOpts } = props; + const isQwen = modelId ? this.isQwenModel(modelId) : false; + const chatOptsArray = Array.isArray(chatOpts) ? chatOpts : chatOpts ? [chatOpts] : []; + const firstChatOpts = chatOptsArray[0] || {}; + const { tools } = firstChatOpts as any; + const hasTools = tools && tools.length > 0; + + // 获取深度思考开关状态 + const enableDeepThink = this._adapter.getState('enableDeepThink'); + + // 创建发送给模型的消息列表 + // 如果关闭深度思考,在最后一条用户消息末尾添加 /no_think 标签 + // 注意:这里需要深拷贝消息列表,避免修改历史记录 + const messagesForRequest = updatedMessages.map((msg, index) => { + // 只处理最后一条用户消息(tool response) + if (index === updatedMessages.length - 1 && msg.role === 'user' && enableDeepThink === false) { + return { + ...msg, + content: `${msg.content} /no_think` + }; + } + return msg; + }); + + const requestParams: any = { + messages: messagesForRequest, + temperature: 1, + stream: true, + stream_options: { include_usage: true }, + }; + + const chunks = await engine.chat.completions.create(requestParams); + + // 获取 stream 配置 + const streamEnabled = props.stream !== false; // 默认为 true + + let reply = ''; + for await (const chunk of chunks) { + if (abortController.signal.aborted) { + break; + } + + const deltaContent = chunk.choices[0]?.delta.content || ''; + if (deltaContent) { + reply += deltaContent; + + // 如果 stream 为 false,不更新UI,只收集内容 + if (streamEnabled) { + let displayContent: string | any[] = reply; + const hasThinkTag = reply.includes(''); + const hasToolCallTag = reply.includes(''); + + if (hasThinkTag || (hasToolCallTag && hasTools && isQwen)) { + try { + const parsed = hasToolCallTag && hasTools && isQwen + ? this.parseContentWithToolCalls(reply, true) + : this.parseThinkingContent(reply, true); + if (Array.isArray(parsed)) { + displayContent = parsed; + } + } catch (e) { + displayContent = reply; + } + } + + const currentChats = this._adapter.getState('chats') || []; + const updatedChatsWithReply = [...currentChats]; + const assistantIndex = updatedChatsWithReply.findIndex((msg) => msg.id === assistantId); + if (assistantIndex !== -1) { + updatedChatsWithReply[assistantIndex] = { + ...updatedChatsWithReply[assistantIndex], + content: displayContent, + status: 'in_progress', + }; + } + this._adapter.setChats(updatedChatsWithReply); + } + } + } + + const fullReply = await engine.getMessage(); + let finalReply = reply || fullReply; + + // 应用 afterAIInput 回调 + if (props.afterAIInput) { + try { + const modifiedContent = await props.afterAIInput(finalReply, [...updatedMessages, { role: 'assistant', content: finalReply }]); + if (modifiedContent !== undefined && modifiedContent !== null) { + finalReply = modifiedContent; + } + } catch (error) { + // 如果回调出错,使用原始回复 + console.warn('afterAIInput callback error:', error); + } + } + + const hasToolCallInReply = finalReply.includes(''); + const parsedContent = (hasToolCallInReply && hasTools && isQwen) + ? this.parseContentWithToolCalls(finalReply, false) + : this.parseThinkingContent(finalReply, false); + + // 如果 stream 为 false,现在更新UI + if (!streamEnabled) { + const currentChats = this._adapter.getState('chats') || []; + const updatedChatsWithReply = [...currentChats]; + const assistantIndex = updatedChatsWithReply.findIndex((msg) => msg.id === assistantId); + if (assistantIndex !== -1) { + updatedChatsWithReply[assistantIndex] = { + ...updatedChatsWithReply[assistantIndex], + content: parsedContent, + status: abortController.signal.aborted ? 'cancelled' : 'completed', + }; + } + this._adapter.setChats(updatedChatsWithReply); + } + + // 更新助手消息为完成状态(仅在 stream=true 时执行,stream=false 时已在上面更新) + if (streamEnabled) { + const finalChats = this._adapter.getState('chats') || []; + const updatedFinalChats = [...finalChats]; + const assistantIndex = updatedFinalChats.findIndex((msg) => msg.id === assistantId); + if (assistantIndex !== -1) { + updatedFinalChats[assistantIndex] = { + ...updatedFinalChats[assistantIndex], + content: parsedContent, + status: abortController.signal.aborted ? 'cancelled' : 'completed', + }; + } + this._adapter.setChats(updatedFinalChats); + } else { + // stream=false 时,只需要更新状态为完成(内容已在上面更新) + const finalChats = this._adapter.getState('chats') || []; + const updatedFinalChats = [...finalChats]; + const assistantIndex = updatedFinalChats.findIndex((msg) => msg.id === assistantId); + if (assistantIndex !== -1) { + updatedFinalChats[assistantIndex] = { + ...updatedFinalChats[assistantIndex], + status: abortController.signal.aborted ? 'cancelled' : 'completed', + }; + } + this._adapter.setChats(updatedFinalChats); + } + + this._adapter.setMessages([...updatedMessages, { role: 'assistant', content: finalReply }]); + this._adapter.setIsGenerating(false); + this._adapter.setAbortController(null); + + // 如果还有 tool calls,继续处理工具调用 + if (hasToolCallInReply && hasTools && isQwen) { + const toolCalls = this.parseQwenToolCalls(finalReply, false); + if (toolCalls && toolCalls.length > 0) { + const props = this.getProps(); + + // 优先使用 handleToolCall(方案2:自动处理) + if (props.handleToolCall) { + try { + const toolResults = await props.handleToolCall(toolCalls, finalReply); + if (toolResults && toolResults.length > 0) { + // 自动发送工具执行结果(递归调用,支持多轮工具调用) + await this.sendToolResults(toolResults); + } + } catch (error) { + // 如果 handleToolCall 出错,显示错误但不中断流程 + const err = error instanceof Error ? error : new Error('工具调用处理失败'); + this._adapter.setError(err.message); + this._adapter.notifyError(err); + } + } else { + // 向后兼容:如果没有 handleToolCall,使用 onToolCall(方案1:手动处理) + this._adapter.notifyToolCall?.(toolCalls, finalReply); + } + } + } + } catch (error) { + const errorChats = this._adapter.getState('chats') || []; + const updatedErrorChats = [...errorChats]; + const assistantIndex = updatedErrorChats.findIndex((msg) => msg.id === assistantId); + if (assistantIndex !== -1) { + updatedErrorChats[assistantIndex] = { + ...updatedErrorChats[assistantIndex], + status: 'failed', + }; + } + + const err = error instanceof Error ? error : new Error('聊天出错'); + this._adapter.setError(err.message); + this._adapter.setChats(updatedErrorChats); + this._adapter.setIsGenerating(false); + this._adapter.setAbortController(null); + this._adapter.notifyError(err); + } + }; + + /** + * 停止生成 + */ + stopGenerate = () => { + const abortController = this._adapter.getState('abortController'); + if (abortController) { + abortController.abort(); + } + this._adapter.setIsGenerating(false); + this._adapter.setAbortController(null); + }; + + /** + * 清理资源 + * @param modelId 模型ID,用于释放引用计数 + * @param unloadWhenNoRef 当引用计数为0时是否卸载模型,默认 false + */ + destroy = async (modelId?: string | string[], unloadWhenNoRef: boolean = false) => { + // 释放 engine 引用 + if (modelId) { + await engineManager.releaseEngine(modelId, unloadWhenNoRef); + } + this._adapter.setEngine(null); + this._adapter.setAbortController(null); + }; +} + diff --git a/packages/semi-foundation/clientAI/interface.ts b/packages/semi-foundation/clientAI/interface.ts new file mode 100644 index 0000000000..32447a8321 --- /dev/null +++ b/packages/semi-foundation/clientAI/interface.ts @@ -0,0 +1,168 @@ +/** + * ClientAI 组件类型定义 + * 从 @mlc-ai/web-llm 重新导出类型,方便用户查阅 web-llm 文档 + */ + +// 重新导出 Tool Calling 相关类型 +export type { ToolCall, ToolCallResult } from './foundation'; + +// 重新导出 web-llm 的核心类型 +export type { + // Engine 相关 + WebWorkerMLCEngine, + MLCEngineInterface, + MLCEngineConfig, + // Config 相关 + AppConfig, + ModelRecord, + ChatOptions, + ChatConfig, + GenerationConfig, + ModelType, + // Callback 相关 + InitProgressCallback, + InitProgressReport, + // OpenAI API 相关 + ChatCompletion, + ChatCompletionChunk, + ChatCompletionRequestBase, + ChatCompletionRequestStreaming, + ChatCompletionRequestNonStreaming, + Completion, + CompletionCreateParamsBase, + CompletionCreateParamsStreaming, + CompletionCreateParamsNonStreaming, + CreateEmbeddingResponse, + EmbeddingCreateParams, +} from '@mlc-ai/web-llm'; + +// 重新导出 web-llm 的函数(类型) +export type { + CreateWebWorkerMLCEngine, +} from '@mlc-ai/web-llm'; + +// 导入 AIChatDialogue 和 AIChatInput 的 Props 类型 +// 注意:这些类型在 UI 层定义,Foundation 层只做类型引用 +// 实际使用时会在 UI 层导入 + +/** + * Worker 配置 + */ +export interface WorkerConfig { + /** + * Worker URL,如果提供则使用该 URL 创建 Worker + */ + url?: string; + /** + * 是否启用 Worker,默认 true(如果提供了 url) + */ + enabled?: boolean +} + +/** + * ClientAI 组件 Props + */ +export interface ClientAIProps { + /** + * Worker 配置 + */ + worker?: WorkerConfig; + + /** + * 模型 ID,必填 + * 当 engineConfig.appConfig.model_list 包含多个模型时,用于指定使用哪个模型 + * 可以从引擎配置中获取,如:ClientAI.Qwen3_1_7B_EngineConfig.appConfig.model_list[0].model_id + */ + modelId?: string | string[]; + + /** + * 引擎配置,必填 + * 国际用户使用 ClientAI.Qwen3_1_7B_EngineConfig + * 中国大陆用户使用 ClientAI.Qwen3_1_7B_EngineConfigCN + */ + engineConfig?: MLCEngineConfig; + + /** + * 聊天选项,覆盖模型默认配置 + */ + chatOpts?: ChatOptions | ChatOptions[]; + + /** + * 系统提示词 + */ + systemPrompt?: string; + + /** + * 错误回调 + */ + onError?: (error: Error) => void; + + /** + * AIChatDialogue 的透传 props + * 类型定义在 UI 层 + */ + dialogueProps?: any; + + /** + * AIChatInput 的透传 props + * 类型定义在 UI 层 + */ + inputProps?: any; + + /** + * 组件基础 props + */ + className?: string; + style?: any; + + /** + * 用户消息发送前的回调 + * 可以修改用户输入内容,返回的字符串将同时用于显示和发送给AI + * @param userContent 用户输入的原始内容 + * @param messages 完整的消息历史数组 + * @returns 修改后的用户内容 + */ + onUserMessage?: (userContent: string, messages: any[]) => string; + + /** + * AI回复前的回调 + * 可以拦截AI调用并返回自定义回复 + * @param messages 完整的消息历史数组(包含最新的用户消息) + * @returns 非空字符串将作为AI回复,空字符串则正常调用AI + */ + beforeAIInput?: (messages: any[]) => string | Promise; + + /** + * AI回复后的回调 + * 可以修改AI的回复内容 + * @param aiContent AI返回的原始内容 + * @param messages 完整的消息历史数组(包含AI回复) + * @returns 修改后的AI回复内容 + */ + afterAIInput?: (aiContent: string, messages: any[]) => string | Promise; + + /** + * 控制是否流式显示AI回复 + * @default true + * 当为 false 时,等待流式返回完毕后才一次性显示 + */ + stream?: boolean; + + /** + * 默认对话消息 + * 用于设置初始的对话历史,组件加载时会显示这些消息 + */ + defaultMessages?: any[]; + + /** + * Tool 调用处理函数 + * 当 AI 输出包含 tool_call 时,组件会自动调用此函数并等待返回结果,然后自动发送结果继续对话 + * 如果提供了此函数,将优先使用此函数;否则会调用 onToolCall 回调(需要手动调用 sendToolResults) + * @param toolCalls 解析出的 tool calls 数组 + * @param rawOutput AI 的原始输出 + * @returns 返回 Tool 执行结果数组,组件会自动发送这些结果继续对话 + */ + handleToolCall?: (toolCalls: ToolCall[], rawOutput: string) => Promise | ToolCallResult[] +} + + diff --git a/packages/semi-foundation/clientAI/variables.scss b/packages/semi-foundation/clientAI/variables.scss new file mode 100644 index 0000000000..0361021f3e --- /dev/null +++ b/packages/semi-foundation/clientAI/variables.scss @@ -0,0 +1,25 @@ +// ClientAI component variables + +// Spacing +$spacing-clientAI-wrapper-padding: 20px; // 主容器内边距 +$spacing-clientAI-loading-paddingY: 40px; // 加载状态垂直内边距 +$spacing-clientAI-loading-paddingX: 20px; // 加载状态水平内边距 +$spacing-clientAI-loading-gap: 24px; // 加载状态间距 +$spacing-clientAI-loading-content-gap: 12px; // 加载内容间距 +$spacing-clientAI-error-padding: 20px; // 错误状态内边距 +$spacing-clientAI-input-wrapper-margin: 12px; // 输入框外层外边距 +$spacing-clientAI-input-edit-marginY: 12px; // 编辑输入框垂直外边距 +$spacing-clientAI-input-edit-marginX: 0px; // 编辑输入框水平外边距 + +// Width/Height +$width-clientAI-wrapper-max: 1400px; // 主容器最大宽度 +$height-clientAI-wrapper-min: 600px; // 主容器最小高度 +$height-clientAI-loading-min: 400px; // 加载状态最小高度 +$width-clientAI-loading-content-max: 400px; // 加载内容最大宽度 +$height-clientAI-input-wrapper-min: 150px; // 输入框外层最小高度 +$height-clientAI-input-wrapper-max: 300px; // 输入框外层最大高度 +$height-clientAI-input-edit-max: 300px; // 编辑输入框最大高度 + +// Font +$font-clientAI-loading-text-fontSize: 16px; // 加载文本字体大小 + diff --git a/packages/semi-foundation/package.json b/packages/semi-foundation/package.json index eda0fe8b3d..db88c8ee65 100644 --- a/packages/semi-foundation/package.json +++ b/packages/semi-foundation/package.json @@ -3,8 +3,22 @@ "version": "2.90.0", "description": "", "scripts": { + "clean": "rimraf lib", "build:lib": "node ./scripts/compileLib.js", - "prepublishOnly": "npm run build:lib" + "prepublishOnly": "npm run clean && npm run build:lib" + }, + "files": [ + "lib/*", + "**/*.ts", + "**/*.scss", + "!**/node_modules/**", + "!**/*.test.ts", + "!**/*.spec.ts" + ], + "typesVersions": { + "*": { + "*": ["lib/es/*"] + } }, "dependencies": { "@douyinfe/semi-animation": "2.90.0", @@ -20,7 +34,8 @@ "memoize-one": "^5.2.1", "prismjs": "^1.29.0", "remark-gfm": "^4.0.0", - "scroll-into-view-if-needed": "^2.2.24" + "scroll-into-view-if-needed": "^2.2.24", + "@mlc-ai/web-llm": "^0.2.80" }, "keywords": [], "author": "", @@ -38,6 +53,7 @@ "@types/prismjs": "^1.26.3", "babel-plugin-lodash": "^3.3.4", "del": "^6.0.0", + "rimraf": "^3.0.2", "esbuild": "0.24.0", "gulp": "^4.0.2", "gulp-babel": "^8.0.0", diff --git a/packages/semi-ui/clientAI/index.tsx b/packages/semi-ui/clientAI/index.tsx new file mode 100644 index 0000000000..27ad9658aa --- /dev/null +++ b/packages/semi-ui/clientAI/index.tsx @@ -0,0 +1,534 @@ +import React from 'react'; +import BaseComponent from '../_base/baseComponent'; +import PropTypes from 'prop-types'; +import cls from 'classnames'; +import Spin from '../spin'; +import Typography from '../typography'; +import Progress from '../progress'; +import AIChatDialogue from '../aiChatDialogue'; +import AIChatInput from '../aiChatInput'; +import { IconBulb } from '@douyinfe/semi-icons'; +import ClientAIFoundation, { ClientAIAdapter, ToolCall, ToolCallResult } from '@douyinfe/semi-foundation/clientAI/foundation'; +import { ClientAIProps, ClientAIState, ClientAIRenderProps } from './interface'; +import { cssClasses } from '@douyinfe/semi-foundation/clientAI/constants'; +import type { RoleConfig } from '../aiChatDialogue/interface'; +import LocaleConsumer from '../locale/localeConsumer'; +import { Locale } from '../locale/interface'; +import { + Qwen3_1_7B_ENGINE_CONFIG, + Qwen3_1_7B_ENGINE_CONFIG_CN, + Qwen3_4B_ENGINE_CONFIG, + Qwen3_4B_ENGINE_CONFIG_CN, +} from '@douyinfe/semi-foundation/clientAI/constants'; +import type { + WebWorkerMLCEngine, + MLCEngineConfig, + InitProgressReport, +} from '@douyinfe/semi-foundation/clientAI/interface'; +import { MessageContent } from '@douyinfe/semi-foundation/aiChatInput/interface'; +import { Message } from '@douyinfe/semi-foundation/aiChatDialogue/foundation'; +import chatInputToMessage from '@douyinfe/semi-foundation/aiChatDialogue/dataAdapter/chatInputToMessage'; +import '@douyinfe/semi-foundation/clientAI/clientAI.scss'; + +const { Configure } = AIChatInput; + +const prefixCls = cssClasses.PREFIX; + +class ClientAI extends BaseComponent { + static __SemiComponentName__ = 'ClientAI'; + + foundation!: ClientAIFoundation; + + // 静态属性:引擎配置 + // 国外配置(使用 Hugging Face + GitHub Raw) + static Qwen3_1_7B_EngineConfig: MLCEngineConfig = Qwen3_1_7B_ENGINE_CONFIG; + static Qwen3_4B_EngineConfig: MLCEngineConfig = Qwen3_4B_ENGINE_CONFIG; + + // 中国配置(使用 ModelScope + jsDelivr CDN) + static Qwen3_1_7B_EngineConfigCN: MLCEngineConfig = Qwen3_1_7B_ENGINE_CONFIG_CN; + static Qwen3_4B_EngineConfigCN: MLCEngineConfig = Qwen3_4B_ENGINE_CONFIG_CN; + + static propTypes = { + worker: PropTypes.shape({ + url: PropTypes.string, + enabled: PropTypes.bool, + }), + modelId: PropTypes.oneOfType([PropTypes.string, PropTypes.arrayOf(PropTypes.string)]).isRequired, + engineConfig: PropTypes.object.isRequired, + chatOpts: PropTypes.oneOfType([PropTypes.object, PropTypes.arrayOf(PropTypes.object)]), + systemPrompt: PropTypes.string, + onError: PropTypes.func, + dialogueProps: PropTypes.object, + inputProps: PropTypes.object, + roleConfig: PropTypes.object, + className: PropTypes.string, + style: PropTypes.object, + showDeepThinkButton: PropTypes.bool, + defaultEnableDeepThink: PropTypes.bool, + render: PropTypes.func, + onUserMessage: PropTypes.func, + beforeAIInput: PropTypes.func, + afterAIInput: PropTypes.func, + stream: PropTypes.bool, + defaultMessages: PropTypes.array, + onToolCall: PropTypes.func, + handleToolCall: PropTypes.func, + }; + + static defaultProps = { + worker: { + enabled: true, + }, + // 注意:modelId 和 engineConfig 必填 + // modelId 可以从引擎配置中获取,如:ClientAI.Qwen3_1_7B_EngineConfig.appConfig.model_list[0].model_id + // engineConfig 使用 ClientAI.Qwen3_1_7B_EngineConfig(国外)或 ClientAI.Qwen3_1_7B_EngineConfigCN(中国) + systemPrompt: undefined, // 使用 undefined 让 foundation 层根据浏览器语言动态设置 + showDeepThinkButton: false, + defaultEnableDeepThink: true, + stream: true, + }; + + constructor(props: ClientAIProps) { + super(props); + + // 处理 defaultMessages + const { defaultMessages } = props; + let initialChats: Message[] = []; + let initialMessages: any[] = []; + + if (defaultMessages && defaultMessages.length > 0) { + initialChats = defaultMessages.map((msg) => ({ + ...msg, + id: msg.id || `msg_${Date.now()}_${Math.random()}`, + createdAt: msg.createdAt || Date.now(), + status: msg.status || 'completed', + })); + + // 转换为 WebLLM 格式 + initialMessages = initialChats + .filter((msg) => msg.role === 'user' || msg.role === 'assistant') + .map((msg) => { + let content = ''; + if (typeof msg.content === 'string') { + content = msg.content; + } else if (Array.isArray(msg.content)) { + msg.content.forEach((item: any) => { + if (item.type === 'message' && Array.isArray(item.content)) { + item.content.forEach((contentItem: any) => { + if (contentItem.type === 'input_text' || contentItem.type === 'output_text') { + content += (contentItem.text || ''); + } + }); + } else if (item.type === 'output_text') { + content += (item.text || ''); + } + }); + } + return { + role: msg.role, + content: content.trim(), + }; + }) + .filter((msg) => msg.content); + } + + this.state = { + engine: null, + loading: true, // 初始状态为加载中,等待 engine 初始化完成 + error: null, + chats: initialChats, + isGenerating: false, + messages: initialMessages, + abortController: null, + enableDeepThink: props.defaultEnableDeepThink !== false, + initProgress: null, + }; + + this.foundation = new ClientAIFoundation(this.adapter); + } + + get adapter(): ClientAIAdapter { + return { + ...super.adapter, + setEngine: (engine: WebWorkerMLCEngine | null) => this.setState({ engine }), + setLoading: (loading: boolean) => this.setState({ loading }), + setError: (error: string | null) => this.setState({ error }), + setChats: (chats: Message[]) => this.setState({ chats }), + setIsGenerating: (isGenerating: boolean) => this.setState({ isGenerating }), + setAbortController: (controller: AbortController | null) => this.setState({ abortController: controller }), + setMessages: (messages: any[]) => this.setState({ messages }), + setInitProgress: (progress: InitProgressReport | null) => this.setState({ initProgress: progress }), + notifyError: (error: Error) => { + this.props.onError?.(error); + }, + notifyInitProgress: (progress: any) => { + this.props.engineConfig?.initProgressCallback?.(progress); + }, + notifyToolCall: (toolCalls: ToolCall[], rawOutput: string) => { + this.props.onToolCall?.(toolCalls, rawOutput); + }, + }; + } + + /** + * 发送 Tool 执行结果,让 AI 继续对话 + * 可通过 ref 调用此方法 + */ + sendToolResults = async (toolResults: ToolCallResult[]) => { + await this.foundation.sendToolResults(toolResults); + }; + + /** + * 发送文本消息 + * 用于自定义渲染时调用 + */ + sendMessage = async (text: string) => { + if (!text.trim()) return; + const messageContent = { + inputContents: [{ type: 'text', text }] + }; + await this.foundation.handleStreamChat(messageContent as any); + }; + + /** + * 清空消息历史 + */ + clearMessages = () => { + this.setState({ + chats: [], + messages: [], + }); + }; + + /** + * 设置深度思考开关 + */ + setEnableDeepThink = (enable: boolean) => { + this.setState({ enableDeepThink: enable }); + }; + + componentDidMount() { + this.initEngine(); + } + + componentWillUnmount() { + const { modelId } = this.props; + if (modelId) { + this.foundation.destroy(modelId, false); + } + } + + /** + * 初始化引擎 + */ + initEngine = async () => { + const { worker, modelId, engineConfig, chatOpts } = this.props; + + // 检查必需的 props + if (!modelId) { + const err = new Error('ClientAI: modelId is required. You can get it from engineConfig, e.g.: ClientAI.Qwen3_1_7B_EngineConfig.appConfig.model_list[0].model_id'); + this.setState({ loading: false, error: err.message }); + this.props.onError?.(err); + return; + } + + if (!engineConfig) { + const err = new Error('ClientAI: engineConfig is required. Please use ClientAI.Qwen3_1_7B_EngineConfig (international) or ClientAI.Qwen3_1_7B_EngineConfigCN (China).'); + this.setState({ loading: false, error: err.message }); + this.props.onError?.(err); + return; + } + + try { + await this.foundation.initEngine({ + modelId, + worker, + engineConfig, + chatOpts, + }); + } catch (error) { + const err = error instanceof Error ? error : new Error('Unknown error'); + this.props.onError?.(err); + } + }; + + /** + * 处理消息发送 + */ + handleMessageSend = async (messageContent: MessageContent) => { + await this.foundation.handleStreamChat(messageContent); + }; + + /** + * 处理停止生成 + */ + handleStopGenerate = () => { + this.foundation.stopGenerate(); + }; + + /** + * 处理聊天消息变化 + */ + handleChatsChange = (chats?: Message[]) => { + if (chats) { + this.setState({ chats }); + } + }; + + + /** + * 处理消息编辑 + */ + handleMessageEdit = (message?: Message) => { + if (!message) return; + + // 标记消息为编辑状态 + this.setState((prevState) => ({ + chats: prevState.chats.map((msg) => + msg.id === message.id ? { ...msg, editing: true } : { ...msg, editing: false } + ), + })); + }; + + /** + * 处理编辑消息发送 + */ + handleEditMessageSend = (messageContent: MessageContent) => { + const { chats } = this.state; + const editingIndex = chats.findIndex((msg) => msg.editing); + + if (editingIndex === -1) return; + + const editedMessage: Message = { + ...chatInputToMessage(messageContent), + id: chats[editingIndex].id, + createdAt: chats[editingIndex].createdAt, + status: 'completed', + }; + + this.setState((prevState) => ({ + chats: prevState.chats.map((msg, index) => + index === editingIndex ? editedMessage : { ...msg, editing: false } + ), + })); + }; + + /** + * 处理配置区域变化(深度思考按钮) + */ + handleConfigureChange = (value: Record, changedValue: Record) => { + if ('deepThink' in changedValue) { + this.setState({ enableDeepThink: changedValue.deepThink }); + } + }; + + /** + * 渲染配置区域(深度思考按钮) + */ + renderConfigureArea = () => { + const { enableDeepThink } = this.state; + return ( + + {(locale: Locale["ClientAI"]) => ( + } + field="deepThink" + initValue={enableDeepThink} + > + {locale.deepThink} + + )} + + ); + }; + + /** + * 渲染消息编辑输入框 + */ + renderMessageEdit = (props: MessageContent) => { + const editingMessage = this.state.chats.find((msg) => msg.editing); + if (!editingMessage) return null; + + const { inputProps } = this.props; + const inputEditCls = cls(`${prefixCls}-input-edit`, inputProps?.className); + + return ( + + ); + }; + + /** + * 获取自定义渲染时的 props + */ + getRenderProps = (): ClientAIRenderProps => { + const { chats, loading, error, isGenerating, enableDeepThink } = this.state; + return { + loading, + error, + messages: chats, + isGenerating, + enableDeepThink, + sendMessage: this.sendMessage, + stopGenerate: this.handleStopGenerate, + clearMessages: this.clearMessages, + setEnableDeepThink: this.setEnableDeepThink, + sendToolResults: this.sendToolResults, + }; + }; + + render() { + const { engine, loading, error, chats, isGenerating } = this.state; + const { dialogueProps, inputProps, className, style, systemPrompt, showDeepThinkButton, render, roleConfig: propRoleConfig } = this.props; + + // 如果传入了自定义 render 函数,使用自定义渲染 + if (typeof render === 'function') { + return render(this.getRenderProps()); + } + + // 使用用户传入的 roleConfig 或默认配置 + const roleConfig = propRoleConfig; + + const wrapperCls = cls(prefixCls, className); + const loadingCls = cls(`${prefixCls}-loading`); + const loadingContentCls = cls(`${prefixCls}-loading-content`); + const loadingTextCls = cls(`${prefixCls}-loading-text`); + const errorCls = cls(`${prefixCls}-error`); + const contentCls = cls(`${prefixCls}-content`); + const dialogueWrapperCls = cls(`${prefixCls}-dialogue-wrapper`); + const inputWrapperCls = cls(`${prefixCls}-input-wrapper`); + const inputEditCls = cls(`${prefixCls}-input-edit`); + + if (loading) { + const { initProgress } = this.state; + // InitProgressReport 的结构:{ progress?: number, text?: string } + // progress 是 0-1 之间的数字,text 是进度描述文本 + let progressPercent: number | undefined; + + if (initProgress && typeof initProgress === 'object') { + // 检查是否有 progress 字段 + if ('progress' in initProgress && typeof initProgress.progress === 'number') { + progressPercent = Math.round(initProgress.progress * 100); + } + } + + const showProgress = progressPercent !== undefined; + + return ( + + {(locale: Locale["ClientAI"]) => ( +

+ +
+ + {locale.loading} + + {showProgress && ( + + )} +
+
+ )} + + ); + } + + if (error) { + return ( + + {(locale: Locale["ClientAI"]) => ( +
+ + {locale.loadError} + + {error} +
+ )} +
+ ); + } + + if (!engine) { + return null; + } + + return ( + + {(locale: Locale["ClientAI"]) => { + // 默认角色配置(不包含 avatar) + const defaultRoleConfig: RoleConfig = { + user: { + name: locale.roleUser, + }, + assistant: { + name: locale.roleAssistant, + }, + system: { + name: locale.roleSystem, + }, + }; + + // 使用用户传入的 roleConfig 或默认配置 + const finalRoleConfig = roleConfig || defaultRoleConfig; + + return ( +
+
+
+ +
+ + +
+
+ ); + }} +
+ ); + } +} + +export default ClientAI; +export { ClientAI }; + diff --git a/packages/semi-ui/clientAI/interface.ts b/packages/semi-ui/clientAI/interface.ts new file mode 100644 index 0000000000..34572079a7 --- /dev/null +++ b/packages/semi-ui/clientAI/interface.ts @@ -0,0 +1,122 @@ +import type { ClientAIProps as FoundationClientAIProps, ToolCall, ToolCallResult, InitProgressReport } from '@douyinfe/semi-foundation/clientAI/interface'; +import type { AIChatDialogueProps, RoleConfig } from '../aiChatDialogue/interface'; +import type { AIChatInputProps } from '../aiChatInput/interface'; +import type { Message } from '@douyinfe/semi-foundation/aiChatDialogue/foundation'; +import type { ReactNode } from 'react'; + +/** + * 自定义渲染时传入的参数 + */ +export interface ClientAIRenderProps { + /** 是否正在加载模型 */ + loading: boolean; + /** 错误信息 */ + error: string | null; + /** 消息列表 */ + messages: Message[]; + /** 是否正在生成回复 */ + isGenerating: boolean; + /** 是否启用深度思考 */ + enableDeepThink: boolean; + /** 发送消息 */ + sendMessage: (text: string) => Promise; + /** 停止生成 */ + stopGenerate: () => void; + /** 清空消息 */ + clearMessages: () => void; + /** 设置深度思考开关 */ + setEnableDeepThink: (enable: boolean) => void; + /** 发送 Tool 执行结果 */ + sendToolResults: (results: ToolCallResult[]) => Promise +} + +export interface ClientAIProps extends FoundationClientAIProps { + dialogueProps?: Partial; + inputProps?: Partial; + /** + * 角色配置 + * 用于配置用户、助手、系统等角色的名称和头像等信息 + */ + roleConfig?: RoleConfig; + /** + * Tool 调用回调 + * 当 AI 输出包含 tool_call 时触发(仅用于通知,需要手动调用 sendToolResults) + * @deprecated 推荐使用 handleToolCall,组件会自动处理工具调用和结果发送 + * @param toolCalls 解析出的 tool calls 数组 + * @param rawOutput AI 的原始输出 + */ + onToolCall?: (toolCalls: ToolCall[], rawOutput: string) => void; + /** + * Tool 调用处理函数 + * 当 AI 输出包含 tool_call 时,组件会自动调用此函数并等待返回结果,然后自动发送结果继续对话 + * 如果提供了此函数,将优先使用此函数;否则会调用 onToolCall 回调(需要手动调用 sendToolResults) + * @param toolCalls 解析出的 tool calls 数组 + * @param rawOutput AI 的原始输出 + * @returns 返回 Tool 执行结果数组,组件会自动发送这些结果继续对话 + */ + handleToolCall?: (toolCalls: ToolCall[], rawOutput: string) => Promise | ToolCallResult[]; + /** + * 是否显示深度思考按钮 + * @default false + */ + showDeepThinkButton?: boolean; + /** + * 深度思考默认状态 + * @default true + */ + defaultEnableDeepThink?: boolean; + /** + * 自定义渲染函数 + * 传入此函数后,将完全由用户控制 UI 渲染,内置的 AIChatDialogue 和 AIChatInput 不会被渲染 + */ + render?: (props: ClientAIRenderProps) => ReactNode; + /** + * 用户消息发送前的回调 + * 可以修改用户输入内容,返回的字符串将同时用于显示和发送给AI + * @param userContent 用户输入的原始内容 + * @param messages 完整的消息历史数组 + * @returns 修改后的用户内容 + */ + onUserMessage?: (userContent: string, messages: Message[]) => string; + /** + * AI回复前的回调 + * 可以拦截AI调用并返回自定义回复 + * @param messages 完整的消息历史数组(包含最新的用户消息) + * @returns 非空字符串将作为AI回复,空字符串则正常调用AI + */ + beforeAIInput?: (messages: Message[]) => string | Promise; + /** + * AI回复后的回调 + * 可以修改AI的回复内容 + * @param aiContent AI返回的原始内容 + * @param messages 完整的消息历史数组(包含AI回复) + * @returns 修改后的AI回复内容 + */ + afterAIInput?: (aiContent: string, messages: Message[]) => string | Promise; + /** + * 控制是否流式显示AI回复 + * @default true + * 当为 false 时,等待流式返回完毕后才一次性显示 + */ + stream?: boolean; + /** + * 默认对话消息 + * 用于设置初始的对话历史,组件加载时会显示这些消息 + */ + defaultMessages?: Message[] +} + +export type { ToolCall, ToolCallResult, ClientAIRenderProps }; + +export interface ClientAIState { + engine: any | null; + loading: boolean; + error: string | null; + chats: Message[]; + isGenerating: boolean; + messages: any[]; + abortController: AbortController | null; + enableDeepThink: boolean; + initProgress: InitProgressReport | null +} + diff --git a/packages/semi-ui/feedback/index.tsx b/packages/semi-ui/feedback/index.tsx index e73045ba74..f6529fe5e8 100644 --- a/packages/semi-ui/feedback/index.tsx +++ b/packages/semi-ui/feedback/index.tsx @@ -4,7 +4,7 @@ import cls from 'classnames'; import { cssClasses, strings } from '@douyinfe/semi-foundation/feedback/constants'; import BaseComponent from '../_base/baseComponent'; import { ArrayElement } from '../_base/base'; -import { TextArea, RadioGroup, CheckboxGroup, Button, Modal, SideSheet } from '..'; +import { TextArea, RadioGroup, CheckboxGroup, Button, Modal, SideSheet } from '../index'; import { RadioGroupProps } from '../radio/radioGroup'; import { CheckboxGroupProps } from '../checkbox/checkboxGroup'; import { omit, noop } from 'lodash'; diff --git a/packages/semi-ui/index.ts b/packages/semi-ui/index.ts index fa5b508cb0..b89561dcba 100644 --- a/packages/semi-ui/index.ts +++ b/packages/semi-ui/index.ts @@ -134,4 +134,5 @@ export { default as FloatButton } from './floatButton'; export { default as FloatButtonGroup } from './floatButton/floatButtonGroup'; export { default as AIChatDialogue } from './aiChatDialogue'; export { default as AIChatInput, getConfigureItem } from './aiChatInput'; +export { default as ClientAI } from './clientAI'; export { chatCompletionToMessage, streamingChatCompletionToMessage, streamingResponseToMessage, responseToMessage, chatInputToMessage, chatInputToChatCompletion } from '@douyinfe/semi-foundation/aiChatDialogue/dataAdapter'; diff --git a/packages/semi-ui/locale/interface.ts b/packages/semi-ui/locale/interface.ts index 70b5106c29..960a3b23d2 100644 --- a/packages/semi-ui/locale/interface.ts +++ b/packages/semi-ui/locale/interface.ts @@ -226,5 +226,15 @@ export interface Locale { template: string; configure: string; selected: string + }; + ClientAI: { + deepThink: string; + loading: string; + loadingProgress: string; + loadError: string; + inputPlaceholder: string; + roleUser: string; + roleAssistant: string; + roleSystem: string } } \ No newline at end of file diff --git a/packages/semi-ui/locale/source/ar.ts b/packages/semi-ui/locale/source/ar.ts index 74a97a0558..10394352af 100644 --- a/packages/semi-ui/locale/source/ar.ts +++ b/packages/semi-ui/locale/source/ar.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'القالب', configure: 'التكوين', selected: 'تم اختيار ${count} عناصر', + }, + ClientAI: { + deepThink: 'تفكير عميق', + loading: 'جاري التحميل', + loadingProgress: 'تقدم تحميل النموذج', + loadError: 'خطأ في التحميل', + inputPlaceholder: 'أدخل الرسالة...', + roleUser: 'المستخدم', + roleAssistant: 'مساعد الذكاء الاصطناعي', + roleSystem: 'النظام', } }; diff --git a/packages/semi-ui/locale/source/az.ts b/packages/semi-ui/locale/source/az.ts index 22ac5758bb..565479d8ec 100644 --- a/packages/semi-ui/locale/source/az.ts +++ b/packages/semi-ui/locale/source/az.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Şablon', configure: 'Konfiqurasiya', selected: 'Seçilmiş ${count} element', + }, + ClientAI: { + deepThink: 'Dərin düşüncə', + loading: 'Yüklənir', + loadingProgress: 'Model yükləmə gedişi', + loadError: 'Yükləmə xətası', + inputPlaceholder: 'Mesaj daxil edin...', + roleUser: 'İstifadəçi', + roleAssistant: 'AI Köməkçi', + roleSystem: 'Sistem', } }; diff --git a/packages/semi-ui/locale/source/bg.ts b/packages/semi-ui/locale/source/bg.ts index 20d641f918..ef2286315c 100644 --- a/packages/semi-ui/locale/source/bg.ts +++ b/packages/semi-ui/locale/source/bg.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Шаблон', configure: 'Конфигуриране', selected: 'Избрани ${count} елемента', + }, + ClientAI: { + deepThink: 'Дълбоко мислене', + loading: 'Зареждане', + loadingProgress: 'Прогрес на зареждане на модела', + loadError: 'Грешка при зареждане', + inputPlaceholder: 'Въведете съобщение...', + roleUser: 'Потребител', + roleAssistant: 'AI асистент', + roleSystem: 'Система', } }; diff --git a/packages/semi-ui/locale/source/bn_IN.ts b/packages/semi-ui/locale/source/bn_IN.ts index 4536d3a901..f7bc5cc162 100644 --- a/packages/semi-ui/locale/source/bn_IN.ts +++ b/packages/semi-ui/locale/source/bn_IN.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'টেমপ্লেট', configure: 'কনফিগার করুন', selected: '${count}টি আইটেম নির্বাচন করা হয়েছে', + }, + ClientAI: { + deepThink: 'গভীর চিন্তা', + loading: 'লোড হচ্ছে', + loadingProgress: 'মডেল লোডিং অগ্রগতি', + loadError: 'লোড ত্রুটি', + inputPlaceholder: 'বার্তা লিখুন...', + roleUser: 'ব্যবহারকারী', + roleAssistant: 'AI সহায়ক', + roleSystem: 'সিস্টেম', } }; diff --git a/packages/semi-ui/locale/source/ca.ts b/packages/semi-ui/locale/source/ca.ts index de10335d18..e0a56ea03e 100644 --- a/packages/semi-ui/locale/source/ca.ts +++ b/packages/semi-ui/locale/source/ca.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Plantilla', configure: 'Configura', selected: 'S\'han seleccionat ${count} elements', + }, + ClientAI: { + deepThink: 'Pensament profund', + loading: 'Carregant', + loadingProgress: 'Progrés de càrrega del model', + loadError: 'Error de càrrega', + inputPlaceholder: 'Introduïu el missatge...', + roleUser: 'Usuari', + roleAssistant: 'Assistent IA', + roleSystem: 'Sistema', } }; diff --git a/packages/semi-ui/locale/source/ceb_PH.ts b/packages/semi-ui/locale/source/ceb_PH.ts index 7cdcc7109a..1620156174 100644 --- a/packages/semi-ui/locale/source/ceb_PH.ts +++ b/packages/semi-ui/locale/source/ceb_PH.ts @@ -227,6 +227,16 @@ const local: Locale = { template: 'Template', configure: 'I-configure', selected: 'Napili ang ${count} ka mga aytem', + }, + ClientAI: { + deepThink: 'Lawom nga paghunahuna', + loading: 'Nag-load', + loadingProgress: 'Pag-uswag sa pag-load sa modelo', + loadError: 'Sayop sa pag-load', + inputPlaceholder: 'Isulod ang mensahe...', + roleUser: 'Tiggamit', + roleAssistant: 'AI Katabang', + roleSystem: 'Sistema', } }; diff --git a/packages/semi-ui/locale/source/cs_CZ.ts b/packages/semi-ui/locale/source/cs_CZ.ts index cd89127eed..a28fd6bb5f 100644 --- a/packages/semi-ui/locale/source/cs_CZ.ts +++ b/packages/semi-ui/locale/source/cs_CZ.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Šablona', configure: 'Nastavení', selected: 'Vybrané položky (${count})', + }, + ClientAI: { + deepThink: 'Hluboké přemýšlení', + loading: 'Načítání', + loadingProgress: 'Průběh načítání modelu', + loadError: 'Chyba načítání', + inputPlaceholder: 'Zadejte zprávu...', + roleUser: 'Uživatel', + roleAssistant: 'AI asistent', + roleSystem: 'Systém', } }; diff --git a/packages/semi-ui/locale/source/da.ts b/packages/semi-ui/locale/source/da.ts index 29a368a2e5..6798c1e6be 100644 --- a/packages/semi-ui/locale/source/da.ts +++ b/packages/semi-ui/locale/source/da.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Skabelon', configure: 'Konfigurer', selected: 'Valgte ${count} elementer', + }, + ClientAI: { + deepThink: 'Dybt tænkning', + loading: 'Indlæser', + loadingProgress: 'Modelloadningsfremgang', + loadError: 'Indlæsningsfejl', + inputPlaceholder: 'Indtast besked...', + roleUser: 'Bruger', + roleAssistant: 'AI-assistent', + roleSystem: 'System', } }; diff --git a/packages/semi-ui/locale/source/de.ts b/packages/semi-ui/locale/source/de.ts index 425dc4befe..7f2c6dc352 100644 --- a/packages/semi-ui/locale/source/de.ts +++ b/packages/semi-ui/locale/source/de.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Vorlage', configure: 'Konfigurieren', selected: 'Ausgewählt ${count} Elemente', + }, + ClientAI: { + deepThink: 'Tiefes Nachdenken', + loading: 'Laden', + loadingProgress: 'Modelllade-Fortschritt', + loadError: 'Ladefehler', + inputPlaceholder: 'Nachricht eingeben...', + roleUser: 'Benutzer', + roleAssistant: 'KI-Assistent', + roleSystem: 'System', } }; diff --git a/packages/semi-ui/locale/source/el_GR.ts b/packages/semi-ui/locale/source/el_GR.ts index ee518fe235..5b3db2fd32 100644 --- a/packages/semi-ui/locale/source/el_GR.ts +++ b/packages/semi-ui/locale/source/el_GR.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Πρότυπο', configure: 'Ρύθμιση', selected: 'Έγινε επιλογή ${count} στοιχείων', + }, + ClientAI: { + deepThink: 'Βαθιά σκέψη', + loading: 'Φόρτωση', + loadingProgress: 'Πρόοδος φόρτωσης μοντέλου', + loadError: 'Σφάλμα φόρτωσης', + inputPlaceholder: 'Εισάγετε μήνυμα...', + roleUser: 'Χρήστης', + roleAssistant: 'Βοηθός AI', + roleSystem: 'Σύστημα', } }; diff --git a/packages/semi-ui/locale/source/en_GB.ts b/packages/semi-ui/locale/source/en_GB.ts index 2ec4748b43..ca18aaeb93 100644 --- a/packages/semi-ui/locale/source/en_GB.ts +++ b/packages/semi-ui/locale/source/en_GB.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Template', configure: 'Configure', selected: 'Selected ${count} items', + }, + ClientAI: { + deepThink: 'Deep Think', + loading: 'Loading', + loadingProgress: 'Model loading progress', + loadError: 'Load Error', + inputPlaceholder: 'Enter message...', + roleUser: 'User', + roleAssistant: 'AI Assistant', + roleSystem: 'System', } }; diff --git a/packages/semi-ui/locale/source/en_US.ts b/packages/semi-ui/locale/source/en_US.ts index 9bc17097b2..2044c9cbb9 100644 --- a/packages/semi-ui/locale/source/en_US.ts +++ b/packages/semi-ui/locale/source/en_US.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Template', configure: 'Configure', selected: 'Selected ${count} items', + }, + ClientAI: { + deepThink: 'Deep Think', + loading: 'Loading', + loadingProgress: 'Model loading progress', + loadError: 'Load Error', + inputPlaceholder: 'Enter message...', + roleUser: 'User', + roleAssistant: 'AI Assistant', + roleSystem: 'System', } }; diff --git a/packages/semi-ui/locale/source/es.ts b/packages/semi-ui/locale/source/es.ts index 5e475b1ac6..6988996e46 100644 --- a/packages/semi-ui/locale/source/es.ts +++ b/packages/semi-ui/locale/source/es.ts @@ -231,6 +231,16 @@ const locale: Locale = { template: 'Plantilla', configure: 'Configurar', selected: 'Seleccionado ${count} elementos', + }, + ClientAI: { + deepThink: 'Pensamiento profundo', + loading: 'Cargando', + loadingProgress: 'Progreso de carga del modelo', + loadError: 'Error de carga', + inputPlaceholder: 'Ingrese mensaje...', + roleUser: 'Usuario', + roleAssistant: 'Asistente IA', + roleSystem: 'Sistema', } }; diff --git a/packages/semi-ui/locale/source/es_419.ts b/packages/semi-ui/locale/source/es_419.ts index 41c97f23c2..db8ff7dd92 100644 --- a/packages/semi-ui/locale/source/es_419.ts +++ b/packages/semi-ui/locale/source/es_419.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Plantilla', configure: 'Configurar', selected: '${count} artículos seleccionados', + }, + ClientAI: { + deepThink: 'Pensamiento profundo', + loading: 'Cargando', + loadingProgress: 'Progreso de carga del modelo', + loadError: 'Error de carga', + inputPlaceholder: 'Ingrese mensaje...', + roleUser: 'Usuario', + roleAssistant: 'Asistente IA', + roleSystem: 'Sistema', } }; diff --git a/packages/semi-ui/locale/source/et.ts b/packages/semi-ui/locale/source/et.ts index deeda30179..8c32f10c58 100644 --- a/packages/semi-ui/locale/source/et.ts +++ b/packages/semi-ui/locale/source/et.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Mall', configure: 'Konfigureeri', selected: 'Valitud ${count} elementi', + }, + ClientAI: { + deepThink: 'Sügav mõtlemine', + loading: 'Laadimine', + loadingProgress: 'Mudeli laadimise edenemine', + loadError: 'Laadimise viga', + inputPlaceholder: 'Sisestage sõnum...', + roleUser: 'Kasutaja', + roleAssistant: 'AI abiline', + roleSystem: 'Süsteem', } }; diff --git a/packages/semi-ui/locale/source/fa_IR.ts b/packages/semi-ui/locale/source/fa_IR.ts index 0658162cde..7323f721af 100644 --- a/packages/semi-ui/locale/source/fa_IR.ts +++ b/packages/semi-ui/locale/source/fa_IR.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'قالب', configure: 'پیکربندی', selected: 'مواردی {count} دلاری انتخاب‌شده', + }, + ClientAI: { + deepThink: 'تفکر عمیق', + loading: 'در حال بارگذاری', + loadingProgress: 'پیشرفت بارگذاری مدل', + loadError: 'خطای بارگذاری', + inputPlaceholder: 'پیام را وارد کنید...', + roleUser: 'کاربر', + roleAssistant: 'دستیار هوش مصنوعی', + roleSystem: 'سیستم', } }; diff --git a/packages/semi-ui/locale/source/fi_FI.ts b/packages/semi-ui/locale/source/fi_FI.ts index dfbaee4eac..b0e5ca1575 100644 --- a/packages/semi-ui/locale/source/fi_FI.ts +++ b/packages/semi-ui/locale/source/fi_FI.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Malli', configure: 'Määritä', selected: 'Valitut ${count} kohdetta', + }, + ClientAI: { + deepThink: 'Syvä ajattelu', + loading: 'Ladataan', + loadingProgress: 'Mallin latausprosessi', + loadError: 'Latausvirhe', + inputPlaceholder: 'Kirjoita viesti...', + roleUser: 'Käyttäjä', + roleAssistant: 'AI-avustaja', + roleSystem: 'Järjestelmä', } }; diff --git a/packages/semi-ui/locale/source/fil_PH.ts b/packages/semi-ui/locale/source/fil_PH.ts index d22d606783..419309eaad 100644 --- a/packages/semi-ui/locale/source/fil_PH.ts +++ b/packages/semi-ui/locale/source/fil_PH.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Template', configure: 'I-configure', selected: 'Napili ang ${count} (na) item', + }, + ClientAI: { + deepThink: 'Malalim na pag-iisip', + loading: 'Naglo-load', + loadingProgress: 'Progreso ng pag-load ng modelo', + loadError: 'Error sa pag-load', + inputPlaceholder: 'Ilagay ang mensahe...', + roleUser: 'Gumagamit', + roleAssistant: 'AI Assistant', + roleSystem: 'Sistema', } }; diff --git a/packages/semi-ui/locale/source/fr.ts b/packages/semi-ui/locale/source/fr.ts index a0e6712547..4b6cbd7af0 100644 --- a/packages/semi-ui/locale/source/fr.ts +++ b/packages/semi-ui/locale/source/fr.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Modèle', configure: 'Configurer', selected: '${count} éléments sélectionnés', + }, + ClientAI: { + deepThink: 'Réflexion approfondie', + loading: 'Chargement', + loadingProgress: 'Progression du chargement du modèle', + loadError: 'Erreur de chargement', + inputPlaceholder: 'Entrez un message...', + roleUser: 'Utilisateur', + roleAssistant: 'Assistant IA', + roleSystem: 'Système', } }; diff --git a/packages/semi-ui/locale/source/fr_CA.ts b/packages/semi-ui/locale/source/fr_CA.ts index c08e849f48..9799e2763e 100644 --- a/packages/semi-ui/locale/source/fr_CA.ts +++ b/packages/semi-ui/locale/source/fr_CA.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Modèle', configure: 'Configurer', selected: '${count} éléments sélectionnés', + }, + ClientAI: { + deepThink: 'Réflexion approfondie', + loading: 'Chargement', + loadingProgress: 'Progression du chargement du modèle', + loadError: 'Erreur de chargement', + inputPlaceholder: 'Entrez un message...', + roleUser: 'Utilisateur', + roleAssistant: 'Assistant IA', + roleSystem: 'Système', } }; diff --git a/packages/semi-ui/locale/source/ga.ts b/packages/semi-ui/locale/source/ga.ts index 13cd2ce590..f867a4c096 100644 --- a/packages/semi-ui/locale/source/ga.ts +++ b/packages/semi-ui/locale/source/ga.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Teimpléad', configure: 'Cumraigh', selected: 'Roghnaíodh ${count} mír', + }, + ClientAI: { + deepThink: 'Smaoinimh dhomhain', + loading: 'Á lódáil', + loadingProgress: 'Dul chun cinn lódáil an tsamhail', + loadError: 'Earráid lódála', + inputPlaceholder: 'Iontráil teachtaireacht...', + roleUser: 'Úsáideoir', + roleAssistant: 'Cúntóir AI', + roleSystem: 'Córas', } }; diff --git a/packages/semi-ui/locale/source/he_IL.ts b/packages/semi-ui/locale/source/he_IL.ts index eac74575e6..3e620de906 100644 --- a/packages/semi-ui/locale/source/he_IL.ts +++ b/packages/semi-ui/locale/source/he_IL.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'תבנית', configure: 'הגדרת תצורה', selected: 'נבחרו ${count} פריטים', + }, + ClientAI: { + deepThink: 'חשיבה עמוקה', + loading: 'טוען', + loadingProgress: 'התקדמות טעינת המודל', + loadError: 'שגיאת טעינה', + inputPlaceholder: 'הזן הודעה...', + roleUser: 'משתמש', + roleAssistant: 'עוזר AI', + roleSystem: 'מערכת', } }; diff --git a/packages/semi-ui/locale/source/hi_IN.ts b/packages/semi-ui/locale/source/hi_IN.ts index 4b763de365..743a826dec 100644 --- a/packages/semi-ui/locale/source/hi_IN.ts +++ b/packages/semi-ui/locale/source/hi_IN.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'टेंप्लेट', configure: 'कॉन्फ़िगर करें', selected: '${count} आइटम चुने गए', + }, + ClientAI: { + deepThink: 'गहरी सोच', + loading: 'लोड हो रहा है', + loadingProgress: 'मॉडल लोडिंग प्रगति', + loadError: 'लोड त्रुटि', + inputPlaceholder: 'संदेश दर्ज करें...', + roleUser: 'उपयोगकर्ता', + roleAssistant: 'AI सहायक', + roleSystem: 'सिस्टम', } }; diff --git a/packages/semi-ui/locale/source/hr.ts b/packages/semi-ui/locale/source/hr.ts index 0062ced277..b3f9644a72 100644 --- a/packages/semi-ui/locale/source/hr.ts +++ b/packages/semi-ui/locale/source/hr.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Predložak', configure: 'Konfiguriraj', selected: 'Odabrane stavke: ${count}', + }, + ClientAI: { + deepThink: 'Duboko razmišljanje', + loading: 'Učitavanje', + loadingProgress: 'Napredak učitavanja modela', + loadError: 'Greška učitavanja', + inputPlaceholder: 'Unesite poruku...', + roleUser: 'Korisnik', + roleAssistant: 'AI asistent', + roleSystem: 'Sustav', } }; diff --git a/packages/semi-ui/locale/source/hu_HU.ts b/packages/semi-ui/locale/source/hu_HU.ts index 18e60cbfaf..8756ba79d9 100644 --- a/packages/semi-ui/locale/source/hu_HU.ts +++ b/packages/semi-ui/locale/source/hu_HU.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Sablon', configure: 'Beállítás', selected: '${count} elem kiválasztva', + }, + ClientAI: { + deepThink: 'Mély gondolkodás', + loading: 'Betöltés', + loadingProgress: 'Modell betöltési folyamat', + loadError: 'Betöltési hiba', + inputPlaceholder: 'Írja be az üzenetet...', + roleUser: 'Felhasználó', + roleAssistant: 'AI asszisztens', + roleSystem: 'Rendszer', } }; diff --git a/packages/semi-ui/locale/source/id_ID.ts b/packages/semi-ui/locale/source/id_ID.ts index db0a6a47af..8af0e9e915 100644 --- a/packages/semi-ui/locale/source/id_ID.ts +++ b/packages/semi-ui/locale/source/id_ID.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Templat', configure: 'Konfigurasikan', selected: 'Item yang dipilih ${count}', + }, + ClientAI: { + deepThink: 'Berpikir mendalam', + loading: 'Memuat', + loadingProgress: 'Kemajuan pemuatan model', + loadError: 'Kesalahan memuat', + inputPlaceholder: 'Masukkan pesan...', + roleUser: 'Pengguna', + roleAssistant: 'Asisten AI', + roleSystem: 'Sistem', } }; diff --git a/packages/semi-ui/locale/source/is.ts b/packages/semi-ui/locale/source/is.ts index 9e61a5edf6..39a88b0de4 100644 --- a/packages/semi-ui/locale/source/is.ts +++ b/packages/semi-ui/locale/source/is.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Sniðmát', configure: 'Stilla', selected: '${count} atriði valin', + }, + ClientAI: { + deepThink: 'Djúp hugsun', + loading: 'Hleður', + loadingProgress: 'Framvinda hleðslu líkans', + loadError: 'Hleðsluvilla', + inputPlaceholder: 'Sláðu inn skilaboð...', + roleUser: 'Notandi', + roleAssistant: 'AI aðstoðarmaður', + roleSystem: 'Kerfi', } }; diff --git a/packages/semi-ui/locale/source/it.ts b/packages/semi-ui/locale/source/it.ts index f62c595170..9140cb5b18 100644 --- a/packages/semi-ui/locale/source/it.ts +++ b/packages/semi-ui/locale/source/it.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Stampino', configure: 'Configurazione', selected: '${count} elementi selezionati', + }, + ClientAI: { + deepThink: 'Pensiero profondo', + loading: 'Caricamento', + loadingProgress: 'Progresso di caricamento del modello', + loadError: 'Errore di caricamento', + inputPlaceholder: 'Inserisci messaggio...', + roleUser: 'Utente', + roleAssistant: 'Assistente IA', + roleSystem: 'Sistema', } }; diff --git a/packages/semi-ui/locale/source/ja_JP.ts b/packages/semi-ui/locale/source/ja_JP.ts index d64c791940..b64f92f6f8 100644 --- a/packages/semi-ui/locale/source/ja_JP.ts +++ b/packages/semi-ui/locale/source/ja_JP.ts @@ -227,6 +227,16 @@ const local: Locale = { template: 'テンプレート', configure: '設定', selected: '選択された ${count} 個', + }, + ClientAI: { + deepThink: '深い思考', + loading: '読み込み中', + loadingProgress: 'モデル読み込み進捗', + loadError: '読み込みエラー', + inputPlaceholder: 'メッセージを入力...', + roleUser: 'ユーザー', + roleAssistant: 'AI アシスタント', + roleSystem: 'システム', } }; diff --git a/packages/semi-ui/locale/source/jv_ID.ts b/packages/semi-ui/locale/source/jv_ID.ts index 83bf5b6b4b..e5005c2c8c 100644 --- a/packages/semi-ui/locale/source/jv_ID.ts +++ b/packages/semi-ui/locale/source/jv_ID.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Templat', configure: 'Konfigurasi', selected: 'Item ${count} sing dipilih', + }, + ClientAI: { + deepThink: 'Pikiran jero', + loading: 'Muat', + loadingProgress: 'Kemajuan muat model', + loadError: 'Kesalahan muat', + inputPlaceholder: 'Lebokake pesen...', + roleUser: 'Pangguna', + roleAssistant: 'Asisten AI', + roleSystem: 'Sistem', } }; diff --git a/packages/semi-ui/locale/source/kk.ts b/packages/semi-ui/locale/source/kk.ts index 08b79e3859..a961dda5cc 100644 --- a/packages/semi-ui/locale/source/kk.ts +++ b/packages/semi-ui/locale/source/kk.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Үлгі', configure: 'Теңшеу', selected: '${count} элемент таңдалды', + }, + ClientAI: { + deepThink: 'Терең ойлау', + loading: 'Жүктелуде', + loadingProgress: 'Модельді жүктеу барысы', + loadError: 'Жүктеу қатесі', + inputPlaceholder: 'Хабарламаны енгізіңіз...', + roleUser: 'Пайдаланушы', + roleAssistant: 'AI көмекшісі', + roleSystem: 'Жүйе', } }; diff --git a/packages/semi-ui/locale/source/km_KH.ts b/packages/semi-ui/locale/source/km_KH.ts index 7d263a31f7..8116659cc2 100644 --- a/packages/semi-ui/locale/source/km_KH.ts +++ b/packages/semi-ui/locale/source/km_KH.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'ពុម្ពគំរូ', configure: 'កំណត់រចនាសម្ព័ន្ធ', selected: 'បានជ្រើសរើសធាតុ ${count}', + }, + ClientAI: { + deepThink: 'ការគិតស៊ីជម្រៅ', + loading: 'កំពុងផ្ទុក', + loadingProgress: 'ដំណើរការផ្ទុកគំរូ', + loadError: 'កំហុសផ្ទុក', + inputPlaceholder: 'បញ្ចូលសារ...', + roleUser: 'អ្នកប្រើប្រាស់', + roleAssistant: 'ជំនួយការ AI', + roleSystem: 'ប្រព័ន្ធ', } }; diff --git a/packages/semi-ui/locale/source/ko_KR.ts b/packages/semi-ui/locale/source/ko_KR.ts index a73155f1f8..86cda5623e 100644 --- a/packages/semi-ui/locale/source/ko_KR.ts +++ b/packages/semi-ui/locale/source/ko_KR.ts @@ -227,6 +227,16 @@ const local: Locale = { template: '템플릿', configure: '설정', selected: '선택된 ${count} 개', + }, + ClientAI: { + deepThink: '깊은 사고', + loading: '로딩 중', + loadingProgress: '모델 로딩 진행률', + loadError: '로딩 오류', + inputPlaceholder: '메시지 입력...', + roleUser: '사용자', + roleAssistant: 'AI 어시스턴트', + roleSystem: '시스템', } }; diff --git a/packages/semi-ui/locale/source/lt.ts b/packages/semi-ui/locale/source/lt.ts index 6690233497..04de933ece 100644 --- a/packages/semi-ui/locale/source/lt.ts +++ b/packages/semi-ui/locale/source/lt.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Šablonas', configure: 'Konfigūruoti', selected: 'Pasirinkta ${count} elem.', + }, + ClientAI: { + deepThink: 'Gilus mąstymas', + loading: 'Įkeliama', + loadingProgress: 'Modelio įkėlimo pažanga', + loadError: 'Įkėlimo klaida', + inputPlaceholder: 'Įveskite žinutę...', + roleUser: 'Naudotojas', + roleAssistant: 'AI asistentas', + roleSystem: 'Sistema', } }; diff --git a/packages/semi-ui/locale/source/lv.ts b/packages/semi-ui/locale/source/lv.ts index 3f9a3ca707..3c64ea0a79 100644 --- a/packages/semi-ui/locale/source/lv.ts +++ b/packages/semi-ui/locale/source/lv.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Veidne', configure: 'Konfigurēt', selected: 'Atlasīto vienumu skaits: ${count}', + }, + ClientAI: { + deepThink: 'Dziļa domāšana', + loading: 'Ielādē', + loadingProgress: 'Modeļa ielādes progress', + loadError: 'Ielādes kļūda', + inputPlaceholder: 'Ievadiet ziņojumu...', + roleUser: 'Lietotājs', + roleAssistant: 'AI asistents', + roleSystem: 'Sistēma', } }; diff --git a/packages/semi-ui/locale/source/ms_MY.ts b/packages/semi-ui/locale/source/ms_MY.ts index 6edf304c23..a18bbf19be 100644 --- a/packages/semi-ui/locale/source/ms_MY.ts +++ b/packages/semi-ui/locale/source/ms_MY.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Stensil', configure: 'Konfigurasi', selected: '${count} item dipilih', + }, + ClientAI: { + deepThink: 'Berfikir mendalam', + loading: 'Memuatkan', + loadingProgress: 'Kemajuan muat turun model', + loadError: 'Ralat muat turun', + inputPlaceholder: 'Masukkan mesej...', + roleUser: 'Pengguna', + roleAssistant: 'Pembantu AI', + roleSystem: 'Sistem', } }; diff --git a/packages/semi-ui/locale/source/my_MM.ts b/packages/semi-ui/locale/source/my_MM.ts index 718d421704..ebd7d2417f 100644 --- a/packages/semi-ui/locale/source/my_MM.ts +++ b/packages/semi-ui/locale/source/my_MM.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'နမူနာပုံစံ', configure: 'စီစဉ်သတ်မှတ်ရန်', selected: 'ပစ္စည်း ${count} ခု ရွေးချယ်ထားသည်', + }, + ClientAI: { + deepThink: 'နက်နဲသောစဉ်းစားခြင်း', + loading: 'ဖွင့်နေသည်', + loadingProgress: 'မော်ဒယ်တင်ခြင်းတိုးတက်မှု', + loadError: 'ဖွင့်မှုအမှား', + inputPlaceholder: 'သတင်းစာရိုက်ပါ...', + roleUser: 'အသုံးပြုသူ', + roleAssistant: 'AI လက်ထောက်', + roleSystem: 'စနစ်', } }; diff --git a/packages/semi-ui/locale/source/nb.ts b/packages/semi-ui/locale/source/nb.ts index a54716e642..8c1844f68b 100644 --- a/packages/semi-ui/locale/source/nb.ts +++ b/packages/semi-ui/locale/source/nb.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Mal', configure: 'Konfigurer', selected: '${count} elementer er valgt', + }, + ClientAI: { + deepThink: 'Dyp tenkning', + loading: 'Laster', + loadingProgress: 'Modelllastingsfremgang', + loadError: 'Lastingsfeil', + inputPlaceholder: 'Skriv inn melding...', + roleUser: 'Bruker', + roleAssistant: 'AI-assistent', + roleSystem: 'System', } }; diff --git a/packages/semi-ui/locale/source/nl_NL.ts b/packages/semi-ui/locale/source/nl_NL.ts index dc9debdd28..d76d100379 100644 --- a/packages/semi-ui/locale/source/nl_NL.ts +++ b/packages/semi-ui/locale/source/nl_NL.ts @@ -233,6 +233,16 @@ const local: Locale = { template: 'Stencil', configure: 'Configuratie', selected: '${count} items geselecteerd', + }, + ClientAI: { + deepThink: 'Diep nadenken', + loading: 'Laden', + loadingProgress: 'Voortgang model laden', + loadError: 'Laadfout', + inputPlaceholder: 'Voer bericht in...', + roleUser: 'Gebruiker', + roleAssistant: 'AI-assistent', + roleSystem: 'Systeem', } }; diff --git a/packages/semi-ui/locale/source/pl_PL.ts b/packages/semi-ui/locale/source/pl_PL.ts index 16f61b9a27..eae91c1b56 100644 --- a/packages/semi-ui/locale/source/pl_PL.ts +++ b/packages/semi-ui/locale/source/pl_PL.ts @@ -234,6 +234,16 @@ const local: Locale = { template: 'Szablon', configure: 'Konfiguracja', selected: 'Wybrano ${count} elementów', + }, + ClientAI: { + deepThink: 'Głębokie myślenie', + loading: 'Ładowanie', + loadingProgress: 'Postęp ładowania modelu', + loadError: 'Błąd ładowania', + inputPlaceholder: 'Wpisz wiadomość...', + roleUser: 'Użytkownik', + roleAssistant: 'Asystent AI', + roleSystem: 'System', } }; diff --git a/packages/semi-ui/locale/source/pt.ts b/packages/semi-ui/locale/source/pt.ts index 2cf5e2bd2b..e706998714 100644 --- a/packages/semi-ui/locale/source/pt.ts +++ b/packages/semi-ui/locale/source/pt.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Modelo', configure: 'Configurar', selected: '${count} itens selecionados', + }, + ClientAI: { + deepThink: 'Pensamento profundo', + loading: 'A carregar', + loadingProgress: 'Progresso do carregamento do modelo', + loadError: 'Erro ao carregar', + inputPlaceholder: 'Introduza uma mensagem...', + roleUser: 'Utilizador', + roleAssistant: 'Assistente IA', + roleSystem: 'Sistema', } }; diff --git a/packages/semi-ui/locale/source/pt_BR.ts b/packages/semi-ui/locale/source/pt_BR.ts index 8db74b9d8a..9f5be6c711 100644 --- a/packages/semi-ui/locale/source/pt_BR.ts +++ b/packages/semi-ui/locale/source/pt_BR.ts @@ -234,6 +234,16 @@ const local: Locale = { template: 'Estêncil', configure: 'Configuração', selected: '${count} itens selecionados', + }, + ClientAI: { + deepThink: 'Pensamento profundo', + loading: 'Carregando', + loadingProgress: 'Progresso de carregamento do modelo', + loadError: 'Erro ao carregar', + inputPlaceholder: 'Digite uma mensagem...', + roleUser: 'Usuário', + roleAssistant: 'Assistente IA', + roleSystem: 'Sistema', } }; diff --git a/packages/semi-ui/locale/source/ro.ts b/packages/semi-ui/locale/source/ro.ts index 8af302c68d..b5195bac1b 100644 --- a/packages/semi-ui/locale/source/ro.ts +++ b/packages/semi-ui/locale/source/ro.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Sablon', configure: 'Configurare', selected: '${count} articole selectate', + }, + ClientAI: { + deepThink: 'Gândire profundă', + loading: 'Se încarcă', + loadingProgress: 'Progresul încărcării modelului', + loadError: 'Eroare de încărcare', + inputPlaceholder: 'Introduceți mesajul...', + roleUser: 'Utilizator', + roleAssistant: 'Asistent AI', + roleSystem: 'Sistem', } }; diff --git a/packages/semi-ui/locale/source/ru_RU.ts b/packages/semi-ui/locale/source/ru_RU.ts index 0122136385..3764b6ede9 100644 --- a/packages/semi-ui/locale/source/ru_RU.ts +++ b/packages/semi-ui/locale/source/ru_RU.ts @@ -229,6 +229,16 @@ const local: Locale = { template: 'Tрафарет', configure: 'Конфигурация', selected: 'Выбрано ${count} элементов', + }, + ClientAI: { + deepThink: 'Глубокое размышление', + loading: 'Загрузка', + loadingProgress: 'Прогресс загрузки модели', + loadError: 'Ошибка загрузки', + inputPlaceholder: 'Введите сообщение...', + roleUser: 'Пользователь', + roleAssistant: 'ИИ-ассистент', + roleSystem: 'Система', } }; diff --git a/packages/semi-ui/locale/source/sk.ts b/packages/semi-ui/locale/source/sk.ts index ff3e4a85d8..9cfa508205 100644 --- a/packages/semi-ui/locale/source/sk.ts +++ b/packages/semi-ui/locale/source/sk.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Šablóna', configure: 'Nastaviť', selected: 'Vybrané položky (${count})', + }, + ClientAI: { + deepThink: 'Hlboké myslenie', + loading: 'Načítava sa', + loadingProgress: 'Priebeh načítavania modelu', + loadError: 'Chyba načítavania', + inputPlaceholder: 'Zadajte správu...', + roleUser: 'Používateľ', + roleAssistant: 'AI asistent', + roleSystem: 'Systém', } }; diff --git a/packages/semi-ui/locale/source/sl.ts b/packages/semi-ui/locale/source/sl.ts index 320b9eabf0..3cc68d23d6 100644 --- a/packages/semi-ui/locale/source/sl.ts +++ b/packages/semi-ui/locale/source/sl.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Predloga', configure: 'Nastavi', selected: 'Izbranih elementov: ${count}', + }, + ClientAI: { + deepThink: 'Globoko razmišljanje', + loading: 'Nalaganje', + loadingProgress: 'Napredek nalaganja modela', + loadError: 'Napaka nalaganja', + inputPlaceholder: 'Vnesite sporočilo...', + roleUser: 'Uporabnik', + roleAssistant: 'AI asistent', + roleSystem: 'Sistem', } }; diff --git a/packages/semi-ui/locale/source/sq.ts b/packages/semi-ui/locale/source/sq.ts index 57ba20bed2..e16a805377 100644 --- a/packages/semi-ui/locale/source/sq.ts +++ b/packages/semi-ui/locale/source/sq.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Shabllon', configure: 'Konfiguro', selected: 'Janë përzgjedhur ${count} artikuj', + }, + ClientAI: { + deepThink: 'Mendim i thellë', + loading: 'Duke u ngarkuar', + loadingProgress: 'Progresi i ngarkimit të modelit', + loadError: 'Gabim ngarkimi', + inputPlaceholder: 'Shkruani mesazhin...', + roleUser: 'Përdoruesi', + roleAssistant: 'Asistent AI', + roleSystem: 'Sistemi', } }; diff --git a/packages/semi-ui/locale/source/sv_SE.ts b/packages/semi-ui/locale/source/sv_SE.ts index c1e23cfb72..8b13f7e1e7 100644 --- a/packages/semi-ui/locale/source/sv_SE.ts +++ b/packages/semi-ui/locale/source/sv_SE.ts @@ -231,6 +231,16 @@ const local: Locale = { template: 'Stencil', configure: 'Konfiguration', selected: '${count} objekt valda', + }, + ClientAI: { + deepThink: 'Djup tänkande', + loading: 'Laddar', + loadingProgress: 'Modellladdningsframsteg', + loadError: 'Laddningsfel', + inputPlaceholder: 'Ange meddelande...', + roleUser: 'Användare', + roleAssistant: 'AI-assistent', + roleSystem: 'System', } }; diff --git a/packages/semi-ui/locale/source/sw.ts b/packages/semi-ui/locale/source/sw.ts index 63e9c96d06..1075d2b95d 100644 --- a/packages/semi-ui/locale/source/sw.ts +++ b/packages/semi-ui/locale/source/sw.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Kiolezo', configure: 'Weka mipangilio', selected: 'Umechagua vipengee ${count}', + }, + ClientAI: { + deepThink: 'Fikiri kwa kina', + loading: 'Inapakia', + loadingProgress: 'Maendeleo ya kupakia muundo', + loadError: 'Hitilafu ya kupakia', + inputPlaceholder: 'Ingiza ujumbe...', + roleUser: 'Mtumiaji', + roleAssistant: 'Msaidizi wa AI', + roleSystem: 'Mfumo', } }; diff --git a/packages/semi-ui/locale/source/th_TH.ts b/packages/semi-ui/locale/source/th_TH.ts index 5ec07670d6..076b398823 100644 --- a/packages/semi-ui/locale/source/th_TH.ts +++ b/packages/semi-ui/locale/source/th_TH.ts @@ -230,6 +230,16 @@ const local: Locale = { template: 'ลายฉลุ', configure: 'การกำหนดค่า', selected: '${count} รายการที่เลือก', + }, + ClientAI: { + deepThink: 'การคิดอย่างลึกซึ้ง', + loading: 'กำลังโหลด', + loadingProgress: 'ความคืบหน้าการโหลดโมเดล', + loadError: 'ข้อผิดพลาดในการโหลด', + inputPlaceholder: 'ใส่ข้อความ...', + roleUser: 'ผู้ใช้', + roleAssistant: 'ผู้ช่วย AI', + roleSystem: 'ระบบ', } }; diff --git a/packages/semi-ui/locale/source/tr_TR.ts b/packages/semi-ui/locale/source/tr_TR.ts index 518f8b59d4..31c5e42884 100644 --- a/packages/semi-ui/locale/source/tr_TR.ts +++ b/packages/semi-ui/locale/source/tr_TR.ts @@ -227,6 +227,16 @@ const local: Locale = { template: 'Şablon', configure: 'Yapılandırma', selected: '${count} öğe seçildi', + }, + ClientAI: { + deepThink: 'Derin düşünme', + loading: 'Yükleniyor', + loadingProgress: 'Model yükleme ilerlemesi', + loadError: 'Yükleme hatası', + inputPlaceholder: 'Mesaj girin...', + roleUser: 'Kullanıcı', + roleAssistant: 'AI Asistanı', + roleSystem: 'Sistem', } }; diff --git a/packages/semi-ui/locale/source/uk_UA.ts b/packages/semi-ui/locale/source/uk_UA.ts index 0a30f309d1..674d9b0c97 100644 --- a/packages/semi-ui/locale/source/uk_UA.ts +++ b/packages/semi-ui/locale/source/uk_UA.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Шаблон', configure: 'Налаштувати', selected: 'Вибрано елементів: ${count}', + }, + ClientAI: { + deepThink: 'Глибоке мислення', + loading: 'Завантаження', + loadingProgress: 'Прогрес завантаження моделі', + loadError: 'Помилка завантаження', + inputPlaceholder: 'Введіть повідомлення...', + roleUser: 'Користувач', + roleAssistant: 'Помічник ШІ', + roleSystem: 'Система', } }; diff --git a/packages/semi-ui/locale/source/ur.ts b/packages/semi-ui/locale/source/ur.ts index 6b28ac9c09..6acca431d4 100644 --- a/packages/semi-ui/locale/source/ur.ts +++ b/packages/semi-ui/locale/source/ur.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'ٹیمپلیٹ', configure: 'تشکیل کریں', selected: '${count} آئٹمز منتخب کی گئیں', + }, + ClientAI: { + deepThink: 'گہری سوچ', + loading: 'لوڈ ہو رہا ہے', + loadingProgress: 'ماڈل لوڈنگ کی پیشرفت', + loadError: 'لوڈنگ میں خرابی', + inputPlaceholder: 'پیغام درج کریں...', + roleUser: 'صارف', + roleAssistant: 'AI اسسٹنٹ', + roleSystem: 'نظام', } }; diff --git a/packages/semi-ui/locale/source/uz.ts b/packages/semi-ui/locale/source/uz.ts index b1f11d6164..4a2a73db7b 100644 --- a/packages/semi-ui/locale/source/uz.ts +++ b/packages/semi-ui/locale/source/uz.ts @@ -226,6 +226,16 @@ const local: Locale = { template: 'Andoza', configure: 'Sozlash', selected: '${count} ta element tanlangan', + }, + ClientAI: { + deepThink: 'Chuqur fikrlash', + loading: 'Yuklanmoqda', + loadingProgress: 'Model yuklanish jarayoni', + loadError: 'Yuklash xatosi', + inputPlaceholder: 'Xabar kiriting...', + roleUser: 'Foydalanuvchi', + roleAssistant: 'AI yordamchisi', + roleSystem: 'Tizim', } }; diff --git a/packages/semi-ui/locale/source/vi_VN.ts b/packages/semi-ui/locale/source/vi_VN.ts index b749b32a22..3db8e25997 100644 --- a/packages/semi-ui/locale/source/vi_VN.ts +++ b/packages/semi-ui/locale/source/vi_VN.ts @@ -229,6 +229,16 @@ const local: Locale = { template: 'Bản mẫu', configure: 'Cấu hình', selected: 'Đã chọn ${count} mục', + }, + ClientAI: { + deepThink: 'Suy nghĩ sâu sắc', + loading: 'Đang tải', + loadingProgress: 'Tiến trình tải mô hình', + loadError: 'Lỗi tải', + inputPlaceholder: 'Nhập tin nhắn...', + roleUser: 'Người dùng', + roleAssistant: 'Trợ lý AI', + roleSystem: 'Hệ thống', } }; diff --git a/packages/semi-ui/locale/source/zh_CN.ts b/packages/semi-ui/locale/source/zh_CN.ts index 9afe3060b4..a6ffaf6153 100644 --- a/packages/semi-ui/locale/source/zh_CN.ts +++ b/packages/semi-ui/locale/source/zh_CN.ts @@ -227,6 +227,16 @@ const local: Locale = { template: '模板', configure: '配置', selected: '已选 ${count} 个', + }, + ClientAI: { + deepThink: '深度思考', + loading: '加载中', + loadingProgress: '模型加载进度', + loadError: '加载错误', + inputPlaceholder: '输入消息...', + roleUser: '用户', + roleAssistant: 'AI 助手', + roleSystem: '系统', } }; diff --git a/packages/semi-ui/locale/source/zh_TW.ts b/packages/semi-ui/locale/source/zh_TW.ts index b608c42a44..ccbb59995d 100644 --- a/packages/semi-ui/locale/source/zh_TW.ts +++ b/packages/semi-ui/locale/source/zh_TW.ts @@ -227,6 +227,16 @@ const local: Locale = { template: '模板', configure: '配置', selected: '已選 ${count} 個', + }, + ClientAI: { + deepThink: '深度思考', + loading: '載入中', + loadingProgress: '模型載入進度', + loadError: '載入錯誤', + inputPlaceholder: '輸入訊息...', + roleUser: '用戶', + roleAssistant: 'AI 助手', + roleSystem: '系統', } }; diff --git a/packages/semi-ui/package.json b/packages/semi-ui/package.json index 9df65e0ef3..9e6e5935c4 100644 --- a/packages/semi-ui/package.json +++ b/packages/semi-ui/package.json @@ -4,18 +4,68 @@ "description": "A modern, comprehensive, flexible design system and UI library. Connect DesignOps & DevOps. Quickly build beautiful React apps. Maintained by Douyin-fe team.", "main": "lib/cjs/index.js", "module": "lib/es/index.js", - "typings": "lib/es/index.d.ts", + "types": "lib/es/index.d.ts", + "exports": { + ".": { + "types": "./lib/es/index.d.ts", + "import": "./lib/es/index.js", + "require": "./lib/cjs/index.js" + }, + "./*": { + "types": "./lib/es/*/index.d.ts", + "import": "./lib/es/*/index.js", + "require": "./lib/cjs/*/index.js" + }, + "./lib/es/*": { + "types": [ + "./lib/es/*.d.ts", + "./lib/es/*/index.d.ts" + ], + "import": "./lib/es/*.js", + "default": "./lib/es/*.js" + }, + "./lib/cjs/*": { + "types": [ + "./lib/cjs/*.d.ts", + "./lib/cjs/*/index.d.ts" + ], + "require": "./lib/cjs/*.js", + "default": "./lib/cjs/*.js" + } + }, "scripts": { - "clean": "rimraf dist lib", + "clean": "rimraf dist lib content", + "clean:content": "rimraf content", "build:lib": "node ./scripts/compileLib.js", "build:js": "node scripts/compileDist.js", "build:css": "node scripts/compileScss.js", - "prepublishOnly": "npm run clean && npm run build:lib && npm run build:js && npm run build:css" + "copy:content": "node scripts/copyContent.js", + "prepublishOnly": "npm run clean && npm run build:lib && npm run build:js && npm run build:css && npm run copy:content", + "postpublish": "npm run clean:content" }, "files": [ "dist/*", - "lib/*" + "lib/*", + "content/**/*", + "**/*.ts", + "**/*.tsx", + "**/*.scss", + "!**/_story/**", + "!**/__test__/**", + "!**/_test_/**", + "!**/node_modules/**", + "!**/*.test.ts", + "!**/*.test.tsx", + "!**/*.spec.ts", + "!**/*.spec.tsx" ], + "typesVersions": { + "*": { + "*": [ + "lib/es/*" + ] + } + }, "dependencies": { "@dnd-kit/core": "^6.0.8", "@dnd-kit/sortable": "^7.0.2", @@ -52,7 +102,8 @@ }, "peerDependencies": { "react": ">=16.0.0", - "react-dom": ">=16.0.0" + "react-dom": ">=16.0.0", + "@mlc-ai/web-llm": "^0.2.80" }, "sideEffects": [ "*.scss", diff --git a/packages/semi-ui/scripts/copyContent.js b/packages/semi-ui/scripts/copyContent.js new file mode 100644 index 0000000000..348a1ff9df --- /dev/null +++ b/packages/semi-ui/scripts/copyContent.js @@ -0,0 +1,49 @@ +const fs = require('fs'); +const path = require('path'); + +// 排除的文件(工具脚本,不需要发布) +const excludeFiles = ['order.js', 'makeLn.js', 'rename.py']; + +function copyDirectory(src, dest) { + // 确保目标目录存在 + if (!fs.existsSync(dest)) { + fs.mkdirSync(dest, { recursive: true }); + } + + // 读取源目录内容 + const items = fs.readdirSync(src); + + items.forEach(item => { + // 跳过排除的文件 + if (excludeFiles.includes(item)) { + return; + } + + const srcPath = path.join(src, item); + const destPath = path.join(dest, item); + const stat = fs.statSync(srcPath); + + if (stat.isDirectory()) { + // 递归复制子目录 + copyDirectory(srcPath, destPath); + } else { + // 复制文件 + fs.copyFileSync(srcPath, destPath); + } + }); +} + +// 获取项目根目录(从 packages/semi-ui/scripts 向上两级) +const projectRoot = path.resolve(__dirname, '../../..'); +const contentSrc = path.join(projectRoot, 'content'); +const contentDest = path.join(__dirname, '..', 'content'); + +// 如果 content 目录存在,则复制 +if (fs.existsSync(contentSrc)) { + console.log('正在复制 content 目录...'); + copyDirectory(contentSrc, contentDest); + console.log('content 目录复制完成'); +} else { + console.warn(`警告: 未找到 content 目录: ${contentSrc}`); +} + diff --git a/src/images/docIcons/doc-clientAI.svg b/src/images/docIcons/doc-clientAI.svg new file mode 100644 index 0000000000..0474bdce59 --- /dev/null +++ b/src/images/docIcons/doc-clientAI.svg @@ -0,0 +1,29 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/yarn.lock b/yarn.lock index 5405e43df5..311c90646f 100644 --- a/yarn.lock +++ b/yarn.lock @@ -1585,11 +1585,25 @@ "@douyinfe/semi-animation-styled" "2.65.0" classnames "^2.2.6" +"@douyinfe/semi-animation-react@2.89.2-alpha.2": + version "2.89.2-alpha.2" + resolved "https://registry.yarnpkg.com/@douyinfe/semi-animation-react/-/semi-animation-react-2.89.2-alpha.2.tgz#dc5000b81ef7350ba25648fb256e2d7f43ce8764" + integrity sha512-HTv/u2dYY3+43yaLB9DCm4U0fFSw6lhGLyKHHRasG99xDpo7LiI/h9JJK5+qBssJxHYnGc2QZIpldjOs5Gc9IQ== + dependencies: + "@douyinfe/semi-animation" "2.89.2-alpha.2" + "@douyinfe/semi-animation-styled" "2.89.2-alpha.2" + classnames "^2.2.6" + "@douyinfe/semi-animation-styled@2.65.0": version "2.65.0" resolved "https://registry.yarnpkg.com/@douyinfe/semi-animation-styled/-/semi-animation-styled-2.65.0.tgz#8c56047a5704a45b05cc9809a2a126cc24526ea1" integrity sha512-YFF8Ptcz/jwS0phm28XZV7ROqMQ233sjVR0Uy33FImCITr6EAPe5wcCeEmzVZoYS7x3tUFR30SF+0hSO01rQUg== +"@douyinfe/semi-animation-styled@2.89.2-alpha.2": + version "2.89.2-alpha.2" + resolved "https://registry.yarnpkg.com/@douyinfe/semi-animation-styled/-/semi-animation-styled-2.89.2-alpha.2.tgz#9550ca8bf8144d5af18aef019a77b6ccce313652" + integrity sha512-2qAWbs+Zrz5oGfiZcvgwzpOvO+Sf9rAgOl+Y7/ViRt51Z3tSY4jnrO/64Wv/c1bC8e0Z32vpWxLze7FAZVZFdQ== + "@douyinfe/semi-animation@2.65.0": version "2.65.0" resolved "https://registry.yarnpkg.com/@douyinfe/semi-animation/-/semi-animation-2.65.0.tgz#f544a6b420c3e948c09836019e6b63f1382cd12c" @@ -1597,6 +1611,13 @@ dependencies: bezier-easing "^2.1.0" +"@douyinfe/semi-animation@2.89.2-alpha.2": + version "2.89.2-alpha.2" + resolved "https://registry.yarnpkg.com/@douyinfe/semi-animation/-/semi-animation-2.89.2-alpha.2.tgz#b25b643cdebab828d5657efc4172c21a175e0efc" + integrity sha512-dhRJ73WHJJmzL6ftt2+V34Th+RBwo2UMzKjstVK7VudDmoMKruBNVp5KM3tKS2kIjOGmr7RNzgBRkRA42blmpw== + dependencies: + bezier-easing "^2.1.0" + "@douyinfe/semi-foundation@2.65.0": version "2.65.0" resolved "https://registry.yarnpkg.com/@douyinfe/semi-foundation/-/semi-foundation-2.65.0.tgz#20466a9b4baacdde2249930fb709ba035c5a7bea" @@ -1616,6 +1637,26 @@ remark-gfm "^4.0.0" scroll-into-view-if-needed "^2.2.24" +"@douyinfe/semi-foundation@2.89.2-alpha.2": + version "2.89.2-alpha.2" + resolved "https://registry.yarnpkg.com/@douyinfe/semi-foundation/-/semi-foundation-2.89.2-alpha.2.tgz#28363d99069070210f06755682caa7b50314497d" + integrity sha512-d0/wgdNL9/DPjcbuzBtkhdeGhzuSBdoADmM2eZLadZEbtyb4+6Orh7txvOfDTJ5OZMCrWYwSAl7m8Wizoi+/6A== + dependencies: + "@douyinfe/semi-animation" "2.89.2-alpha.2" + "@douyinfe/semi-json-viewer-core" "2.89.2-alpha.2" + "@mdx-js/mdx" "^3.0.1" + async-validator "^3.5.0" + classnames "^2.2.6" + date-fns "^2.29.3" + date-fns-tz "^1.3.8" + fast-copy "^3.0.1 " + lodash "^4.17.21" + lottie-web "^5.12.2" + memoize-one "^5.2.1" + prismjs "^1.29.0" + remark-gfm "^4.0.0" + scroll-into-view-if-needed "^2.2.24" + "@douyinfe/semi-icons@2.65.0", "@douyinfe/semi-icons@latest": version "2.65.0" resolved "https://registry.yarnpkg.com/@douyinfe/semi-icons/-/semi-icons-2.65.0.tgz#af39cbd5431ebccedcf7d9ce689646e54bebc432" @@ -1623,11 +1664,30 @@ dependencies: classnames "^2.2.6" +"@douyinfe/semi-icons@2.89.2-alpha.2": + version "2.89.2-alpha.2" + resolved "https://registry.yarnpkg.com/@douyinfe/semi-icons/-/semi-icons-2.89.2-alpha.2.tgz#e5dc0f7b4ed506e0324f339d4c065a5776453602" + integrity sha512-hY8zZwmnUZmXN5cqRGsSI22Kd49x82fav184QZPJ1grE2jpkZMNtCgNe984RCgQSHh1RQqkPhrK1a5p8HrzDrw== + dependencies: + classnames "^2.2.6" + "@douyinfe/semi-illustrations@2.65.0": version "2.65.0" resolved "https://registry.yarnpkg.com/@douyinfe/semi-illustrations/-/semi-illustrations-2.65.0.tgz#9916c540c91222a1d9f48cd34a941d28b8a05d2f" integrity sha512-1IhOztyBYiSu8WrcvN+oWWtcJTC9+x6zbnYtufx4ToISs5UO1te1PQofABpkDzIJYFtW9yYLxg4uoL4wGjqYMA== +"@douyinfe/semi-illustrations@2.89.2-alpha.2": + version "2.89.2-alpha.2" + resolved "https://registry.yarnpkg.com/@douyinfe/semi-illustrations/-/semi-illustrations-2.89.2-alpha.2.tgz#50f2de49990d2e31e6c84b2e2b67d611126491b5" + integrity sha512-ouQRXZo/rVfyXOqkJY+cmlU1DlLCIDmxRE5eq1GmSAl/yAJCmJXGjo65KS0kGFEmHqgNp7u4N8Ok1QwWUOCUkA== + +"@douyinfe/semi-json-viewer-core@2.89.2-alpha.2": + version "2.89.2-alpha.2" + resolved "https://registry.yarnpkg.com/@douyinfe/semi-json-viewer-core/-/semi-json-viewer-core-2.89.2-alpha.2.tgz#21340444e9b19bbccdd9afe25b3bdf034a8dd033" + integrity sha512-OtdgCExSja6tYhODvISsD2yW2me9SanmfXi0205F6vzCCpj5RP+ACGUgAcSjGiPTBxti+Occs06QCzd8jRh1Xg== + dependencies: + jsonc-parser "^3.3.1" + "@douyinfe/semi-scss-compile@2.23.2": version "2.23.2" resolved "https://registry.yarnpkg.com/@douyinfe/semi-scss-compile/-/semi-scss-compile-2.23.2.tgz#30884bb194ee9ae1e81877985e5663c3297c1ced" @@ -1699,6 +1759,49 @@ resolved "https://registry.yarnpkg.com/@douyinfe/semi-theme-default/-/semi-theme-default-2.61.0.tgz#a7e9bf9534721c12af1d0eeb5d5a2de615896a23" integrity sha512-obn/DOw4vZyKFAlWvZxHTpBLAK9FO9kygTSm2GROgvi+UDB2PPU6l20cuUCsdGUNWJRSqYlTTVZ1tNYIyFZ5Sg== +"@douyinfe/semi-theme-default@2.89.2-alpha.2": + version "2.89.2-alpha.2" + resolved "https://registry.yarnpkg.com/@douyinfe/semi-theme-default/-/semi-theme-default-2.89.2-alpha.2.tgz#3a074f74b7e31a8309fa2125deae53b68f7d2914" + integrity sha512-k9eJuybkqt3EbWTK7Y2BBVwfJazyebyWXD87fxyC0VgqS1t2sxvmzyssAk2fKPRsUEvEulWlQ4NB+W4Xn8BWeQ== + +"@douyinfe/semi-ui@^2.0.0": + version "2.89.1" + resolved "https://registry.yarnpkg.com/@douyinfe/semi-ui/-/semi-ui-2.89.1.tgz#c0573f2531fe24d4830db9331e99295d75af2207" + integrity sha512-1jh9c1NwPEHYIPt+vCSF/TeXrhJ6g6/5745h8x2VZMA6+Epexv61eucTiMEz/HZzi9GC/u+K0GuwSS3CjJdEFA== + dependencies: + "@dnd-kit/core" "^6.0.8" + "@dnd-kit/sortable" "^7.0.2" + "@dnd-kit/utilities" "^3.2.1" + "@douyinfe/semi-animation" "2.89.1" + "@douyinfe/semi-animation-react" "2.89.1" + "@douyinfe/semi-foundation" "2.89.1" + "@douyinfe/semi-icons" "2.89.1" + "@douyinfe/semi-illustrations" "2.89.1" + "@douyinfe/semi-theme-default" "2.89.1" + "@tiptap/core" "^3.10.7" + "@tiptap/extension-document" "^3.10.7" + "@tiptap/extension-hard-break" "^3.10.7" + "@tiptap/extension-mention" "^3.10.7" + "@tiptap/extension-paragraph" "^3.10.7" + "@tiptap/extension-text" "^3.10.7" + "@tiptap/extensions" "^3.10.7" + "@tiptap/pm" "^3.10.7" + "@tiptap/react" "^3.10.7" + async-validator "^3.5.0" + classnames "^2.2.6" + copy-text-to-clipboard "^2.1.1" + date-fns "^2.29.3" + date-fns-tz "^1.3.8" + fast-copy "^3.0.1 " + jsonc-parser "^3.3.1" + lodash "^4.17.21" + prop-types "^15.7.2" + prosemirror-state "^1.4.3" + react-resizable "^3.0.5" + react-window "^1.8.2" + scroll-into-view-if-needed "^2.2.24" + utility-types "^3.10.0" + "@douyinfe/semi-ui@latest": version "2.65.0" resolved "https://registry.yarnpkg.com/@douyinfe/semi-ui/-/semi-ui-2.65.0.tgz#295eb0dd8e9e961adb4ddd7c7bbce3468d1b7430" @@ -3722,6 +3825,13 @@ resolved "https://registry.yarnpkg.com/@mikaelkristiansson/domready/-/domready-1.0.11.tgz#6a4b5891dccb6402ff4e944de843036ee1ffd4f5" integrity sha512-nEBLOa0JgtqahmPrnJZ18epLiFBzxhdKgo4uhN3TaBFRmM30pEVrS9FAEV4tg92d8PTdU+dYQx2lnpPyFMgMcg== +"@mlc-ai/web-llm@^0.2.80": + version "0.2.80" + resolved "https://registry.yarnpkg.com/@mlc-ai/web-llm/-/web-llm-0.2.80.tgz#399931617ebb97456d0fd24b84ac2aa6d65f5cbf" + integrity sha512-Hwy1OCsK5cOU4nKr2wIJ2qA1g595PENtO5f2d9Wd/GgFsj5X04uxfaaJfqED8eFAJOpQpn/DirogdEY/yp5jQg== + dependencies: + loglevel "^1.9.1" + "@monaco-editor/loader@1.0.0": version "1.0.0" resolved "https://registry.yarnpkg.com/@monaco-editor/loader/-/loader-1.0.0.tgz#c7ea78ef07cebcae83d92bbfe2bddab563468102" @@ -18006,6 +18116,11 @@ loglevel@^1.6.8: resolved "https://registry.yarnpkg.com/loglevel/-/loglevel-1.9.1.tgz#d63976ac9bcd03c7c873116d41c2a85bafff1be7" integrity sha512-hP3I3kCrDIMuRwAwHltphhDM1r8i55H33GgqjXbrisuJhF4kRhW1dNuxsRklp4bXl8DSdLaNLuiL4A/LWRfxvg== +loglevel@^1.9.1: + version "1.9.2" + resolved "https://registry.yarnpkg.com/loglevel/-/loglevel-1.9.2.tgz#c2e028d6c757720107df4e64508530db6621ba08" + integrity sha512-HgMmCqIJSAKqo68l0rS2AanEWfkxaZ5wNiEFb5ggm08lDs9Xl2KxBlX3PTcaD2chBM1gXAYf491/M2Rv8Jwayg== + lolex@^2.7.5: version "2.7.5" resolved "https://registry.yarnpkg.com/lolex/-/lolex-2.7.5.tgz#113001d56bfc7e02d56e36291cc5c413d1aa0733"