Skip to content

Commit 30180c4

Browse files
EndlessJour9527yorkie
authored andcommitted
lib: add threepio to start using LLM for rendering contents progressively (#75)
1 parent e3b21aa commit 30180c4

30 files changed

+1523
-17
lines changed

docs/development-zh.md

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,27 @@ $ adb shell setprop jsar.renderer.target_app_fps 60
138138

139139
默认为 45 帧。
140140

141+
**使用 LLM**
142+
143+
```sh
144+
## 配置使用 LLM API 的模型 例如 `qwen-plus-latest`
145+
$ adb shell setprop jsar.threepio.api.modelid your-llm-modelid
146+
## 配置 LLM API 服务的组织/平台 例如 `qwen`
147+
$ adb shell setprop jsar.threepio.api.provider your-llm-provider
148+
## 配置 LLM API 服务的 Key
149+
$ adb shell setprop jsar.threepio.api.key your-api-key
150+
## 配置 LLM API 服务地址 例如 `https://dashscope.aliyuncs.com/compatible-mode/v1`
151+
$ adb shell setprop jsar.threepio.api.endpoint your-api-endpoint
152+
```
153+
154+
我们以[千问模型](https://github.com/QwenLM/Qwen)举例
155+
```sh
156+
$ adb shell setprop jsar.threepio.api.modelid qwen-plus-latest
157+
$ adb shell setprop jsar.threepio.api.provider qwen
158+
$ adb shell setprop jsar.threepio.api.key your-api-key
159+
$ adb shell setprop jsar.threepio.api.endpoint https://dashscope.aliyuncs.com/compatible-mode/v1
160+
```
161+
141162
### 使用 Chrome DevTools 调试
142163

143164
在应用进程日志中找到:

lib/bindings/env.ts

Lines changed: 32 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -97,3 +97,35 @@ export function printSummary() {
9797
* Create an interval timer with 500ms to send keep alive update. It will be hangup when Script is busy.
9898
*/
9999
setInterval(() => nativeContext.keepAlive(), 500);
100+
101+
/**
102+
*
103+
* @returns LLM API key
104+
*/
105+
export const getThreepioApiKey = (): string => {
106+
return process.env.JSAR_THREEPIO_API_KEY as string;
107+
};
108+
109+
/**
110+
*
111+
* @returns LLM API provider name, such as 'doubao', 'qwen', etc.
112+
*/
113+
export const getThreepioApiProvider = (): string => {
114+
return process.env.JSAR_THREEPIO_API_PROVIDER as string;
115+
};
116+
117+
/**
118+
*
119+
* @returns LLM API model ID, such as 'gpt-4', 'qwen-7b', etc.
120+
*/
121+
export const getThreepioApiModelId = (): string => {
122+
return process.env.JSAR_THREEPIO_API_MODELID as string;
123+
};
124+
125+
/**
126+
*
127+
* @returns LLM API endpoint, such as 'https://api.doubao.com/v1/chat/completions', etc.
128+
*/
129+
export const getThreepioApiEndpoint = (): string => {
130+
return process.env.JSAR_THREEPIO_API_ENDPOINT as string;
131+
};

lib/runtime2/index.ts

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ import { ResourceLoaderOnTransmute } from './jsardom/ResourceLoader';
1010
import createModel3dViewer from './viewers/model3d'; // glb, gltf ...
1111
import createImage2dViewer from './viewers/image2d'; // png, jpg, etc ...
1212
import createSplineDesignViewer from './viewers/splinedesign'; // splinedesign
13+
import { Threepio } from './threepio';
1314

1415
Object.defineProperty(BABYLON.PrecisionDate, 'Now', {
1516
get: () => getPerformanceNow(),
@@ -64,7 +65,7 @@ async function evaluateXSML(gl: WebGLRenderingContext | WebGL2RenderingContext,
6465

6566
export class TransmuteRuntime2 extends EventTarget {
6667
#browsingContext: Transmute.BrowsingContext;
67-
68+
#threepio: Threepio;
6869
constructor(private gl: WebGLRenderingContext | WebGL2RenderingContext, private id: number) {
6970
super();
7071
{
@@ -86,13 +87,18 @@ export class TransmuteRuntime2 extends EventTarget {
8687
const browsingContext = new BrowsingContext();
8788
browsingContext.setResourceLoader(new ResourceLoaderOnTransmute());
8889
this.#browsingContext = browsingContext;
90+
this.#threepio = new Threepio(browsingContext);
8991
}
9092
this.dispatchEvent(new Event('rendererReady'));
9193
}
9294

93-
async start(url: string) {
94-
console.info(`Content(#${this.id}): receiving a document request: ${url}`);
95-
await this.load(url);
95+
async start(input: string) {
96+
console.info(`Content(#${this.id}): receiving a document request: ${input}`);
97+
if (input.startsWith('http:') || input.startsWith('https:') || input.startsWith('/')) {
98+
await this.load(input);
99+
} else {
100+
await this.#threepio.request(input);
101+
}
96102
}
97103

98104
private async load(codeOrUrl: string, urlBase?: string) {

lib/runtime2/threepio/api/index.ts

Lines changed: 33 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
import type { Anthropic } from '@anthropic-ai/sdk';
2+
import { ApiConfiguration, MoudleInfo } from '../shared/api';
3+
import { ApiStream, ApiStreamUsageChunk } from './transform/stream';
4+
import { DoubaoHandler } from './providers/doubao';
5+
import { QwenHandler } from './providers/qwen';
6+
7+
export type LLMMessageParam = Anthropic.Messages.MessageParam;
8+
9+
/**
10+
* Interface for defining APIs to a specific large language model on cloud.
11+
* @interface ApiHandler
12+
* @returns A promise that resolves to an `ApiStreamUsageChunk` object containing usage data, or `undefined` if not available.
13+
*/
14+
export interface ApiHandler {
15+
//`createMessage` is used to send a system prompt and a list of messages to the model, returning a streaming response.
16+
createMessage(systemPrompt: string, messages: LLMMessageParam[]): ApiStream;
17+
// retrieves the model's identifier and related information.
18+
getModel(): { id: string; info: MoudleInfo };
19+
// (optional) provides usage statistics for the API stream, if available.
20+
getApiStreamUsage?(): Promise<ApiStreamUsageChunk | undefined>;
21+
}
22+
23+
export function buildApiHandler(configuration: ApiConfiguration): ApiHandler {
24+
const { apiProvider, ...options } = configuration;
25+
switch (apiProvider) {
26+
case 'doubao':
27+
return new DoubaoHandler(options);
28+
case 'qwen':
29+
return new QwenHandler(options);
30+
default:
31+
throw new Error(`Unsupported API provider: ${apiProvider}`);
32+
}
33+
}
Lines changed: 71 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,71 @@
1+
import OpenAI from 'openai';
2+
import { ApiHandler, LLMMessageParam } from '..';
3+
import {
4+
MoudleInfo,
5+
LLMModelId,
6+
ApiHandlerOptions,
7+
LLMModelToParameters,
8+
} from '../../shared/api';
9+
import { convertToOpenAiMessages } from '../transform/openaiFormat';
10+
import { ApiStream } from '../transform/stream';
11+
12+
export class DoubaoHandler implements ApiHandler {
13+
#options: ApiHandlerOptions;
14+
#client: OpenAI;
15+
16+
constructor(options: ApiHandlerOptions) {
17+
this.#options = options;
18+
this.#client = new OpenAI({
19+
baseURL: options.endpoint,
20+
apiKey: options.apiKey,
21+
});
22+
}
23+
24+
getModel(): { id: LLMModelId; info: MoudleInfo } {
25+
const modelId = this.#options.apiModelId;
26+
if (modelId in LLMModelToParameters) {
27+
const id = modelId as LLMModelId;
28+
return { id, info: LLMModelToParameters[id] };
29+
} else {
30+
throw new Error(`Model ID ${modelId} is not supported.`);
31+
}
32+
}
33+
34+
async *createMessage(systemPrompt: string, messages: LLMMessageParam[]): ApiStream {
35+
const model = this.getModel();
36+
const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
37+
{ role: 'system', content: systemPrompt },
38+
...convertToOpenAiMessages(messages),
39+
];
40+
const stream = await this.#client.chat.completions.create({
41+
model: model.id,
42+
max_completion_tokens: model.info.max_completion_tokens,
43+
messages: openAiMessages,
44+
stream: true,
45+
stream_options: { include_usage: true },
46+
temperature: 0,
47+
});
48+
49+
for await (const chunk of stream) {
50+
const delta = chunk.choices[0]?.delta;
51+
if (delta?.content) {
52+
yield {
53+
type: 'text',
54+
text: delta.content,
55+
};
56+
}
57+
58+
if (chunk.usage) {
59+
yield {
60+
type: 'usage',
61+
inputTokens: chunk.usage.prompt_tokens || 0,
62+
outputTokens: chunk.usage.completion_tokens || 0,
63+
// @ts-ignore-next-line
64+
cacheReadTokens: chunk.usage.prompt_cache_hit_tokens || 0,
65+
// @ts-ignore-next-line
66+
cacheWriteTokens: chunk.usage.prompt_cache_miss_tokens || 0,
67+
};
68+
}
69+
}
70+
}
71+
}
Lines changed: 72 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,72 @@
1+
import OpenAI from 'openai';
2+
import { ApiHandler, LLMMessageParam } from '..';
3+
import {
4+
ApiHandlerOptions,
5+
LLMModelToParameters,
6+
MoudleInfo,
7+
LLMModelId
8+
} from '../../shared/api';
9+
import { convertToOpenAiMessages } from '../transform/openaiFormat';
10+
import { ApiStream } from '../transform/stream';
11+
12+
export class QwenHandler implements ApiHandler {
13+
#options: ApiHandlerOptions;
14+
#client: OpenAI;
15+
16+
constructor(options: ApiHandlerOptions) {
17+
this.#options = options;
18+
this.#client = new OpenAI({
19+
baseURL: options.endpoint,
20+
apiKey: options.apiKey,
21+
});
22+
}
23+
24+
getModel(): { id: LLMModelId; info: MoudleInfo } {
25+
const modelId = this.#options.apiModelId as LLMModelId;
26+
if (!modelId) {
27+
throw new Error('Model ID is not provided in the options.');
28+
}
29+
return {
30+
id: modelId,
31+
info: LLMModelToParameters[modelId],
32+
}
33+
}
34+
35+
async *createMessage(systemPrompt: string, messages: LLMMessageParam[]): ApiStream {
36+
const model = this.getModel();
37+
const openAiMessages: OpenAI.Chat.ChatCompletionMessageParam[] = [
38+
{ role: 'system', content: systemPrompt },
39+
...convertToOpenAiMessages(messages),
40+
];
41+
const stream = await this.#client.chat.completions.create({
42+
model: model.id,
43+
max_completion_tokens: model.info.max_completion_tokens,
44+
messages: openAiMessages,
45+
stream: true,
46+
stream_options: { include_usage: true },
47+
...({ temperature: 0 }),
48+
});
49+
50+
for await (const chunk of stream) {
51+
const delta = chunk.choices[0]?.delta;
52+
if (delta?.content) {
53+
yield {
54+
type: 'text',
55+
text: delta.content,
56+
};
57+
}
58+
59+
if (chunk.usage) {
60+
yield {
61+
type: 'usage',
62+
inputTokens: chunk.usage.prompt_tokens || 0,
63+
outputTokens: chunk.usage.completion_tokens || 0,
64+
// @ts-ignore-next-line
65+
cacheReadTokens: chunk.usage.prompt_cache_hit_tokens || 0,
66+
// @ts-ignore-next-line
67+
cacheWriteTokens: chunk.usage.prompt_cache_miss_tokens || 0,
68+
};
69+
}
70+
}
71+
}
72+
}

0 commit comments

Comments
 (0)