Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions cleanup-index-js.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,10 @@
sed -e s/"const{createRequire:createRequire}=await import('module');"//g -i .backup lib/index.js
sed -e s/"const{createRequire:createRequire}=await import('module');"//g -i .backup lib/index.js.map

# Replace scriptDirectory init that Parcel cannot resolve ("new URL('./', import.meta.url)") with a plain relative string
sed -e s~"require(\\\"url\\\").fileURLToPath(new URL(\\\"\\.\\/\\\",import.meta.url))"~"\\\"./\\\""~g -i .backup lib/index.js
sed -e s~"require(\\\"url\\\").fileURLToPath(new URL(\\\"\\.\\/\\\",import.meta.url))"~'\\\".\\\"'~g -i .backup lib/index.js.map

# Replace string "new (require('u' + 'rl').URL)('file:' + __filename).href" with "MLC_DUMMY_PATH"
# This is required for building nextJS projects -- its compile time would complain about `require()`
# See https://github.com/mlc-ai/web-llm/issues/383 and the fixing PR's description for more.
Expand Down
11 changes: 11 additions & 0 deletions examples/structural-tag-tool-use/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
# Structural tag MCP-style tool calls

Run `npm install`, then `npm start` to launch a minimal page that prints progress and logs to the browser console.

This example demonstrates how to:

- Define a structural tag that forces an MCP-style `<tool_call>...</tool_call>` block with `{"name": ..., "arguments": ...}` payloads.
- Ask WebLLM for a tool call with `response_format.type = "structural_tag"`, parse the call, and dispatch to a stubbed tool implementation.
- Send the tool result back via a `tool` message and request a final natural-language answer.

Open the console to see the enforced tool call, the stubbed tool response, and the final assistant reply.
20 changes: 20 additions & 0 deletions examples/structural-tag-tool-use/package.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
{
"name": "structural-tag-tool-use",
"version": "0.1.0",
"private": true,
"scripts": {
"start": "parcel src/mcp_structural_tag.html --port 8887",
"build": "parcel build src/mcp_structural_tag.html --dist-dir lib"
},
"devDependencies": {
"buffer": "^5.7.1",
"parcel": "2.8.3",
"process": "^0.11.10",
"tslib": "^2.3.1",
"typescript": "^4.9.5",
"url": "^0.11.3"
},
"dependencies": {
"@mlc-ai/web-llm": "file:../.."
}
}
16 changes: 16 additions & 0 deletions examples/structural-tag-tool-use/src/mcp_structural_tag.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,16 @@
<!doctype html>
<html>
<script>
webLLMGlobal = {};
</script>
<body>
<h2>Structural tag MCP-style tool calls</h2>
<p>
Open the console to see the enforced tool call, tool response, and final
reply.
</p>
<label id="init-label"></label>
<pre id="log"></pre>
<script type="module" src="./mcp_structural_tag.ts"></script>
</body>
</html>
225 changes: 225 additions & 0 deletions examples/structural-tag-tool-use/src/mcp_structural_tag.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,225 @@
import * as webllm from "@mlc-ai/web-llm";

type ToolInvocation = {
name: string;
arguments: Record<string, unknown>;
};

type ToolDefinition = {
name: string;
description: string;
schema: Record<string, unknown>;
};

const tools: ToolDefinition[] = [
{
name: "get_weather",
description: "Fetch an approximate weather report for a city.",
schema: {
type: "object",
properties: {
location: { type: "string", description: "City name, e.g. Tokyo" },
unit: {
type: "string",
enum: ["celsius", "fahrenheit"],
description: "Temperature unit",
},
},
required: ["location"],
},
},
{
name: "get_time",
description: "Return the current time in a given IANA timezone.",
schema: {
type: "object",
properties: {
timezone: {
type: "string",
description: "IANA timezone name, defaults to UTC",
},
},
required: [],
},
},
];

const mcpStructuralTag = {
type: "structural_tag",
format: {
type: "triggered_tags",
triggers: ["<tool_call>"],
tags: tools.map((tool) => ({
begin: `<tool_call>\n{"name": "${tool.name}", "arguments": `,
content: { type: "json_schema", json_schema: tool.schema },
end: "}\n</tool_call>",
})),
at_least_one: true,
stop_after_first: false,
},
} as const;

const initProgressCallback = (report: webllm.InitProgressReport) => {
setLabel("init-label", report.text);
};

function setLabel(id: string, text: string) {
const label = document.getElementById(id);
if (label == null) {
throw Error("Cannot find label " + id);
}
label.innerText = text;
}

function appendLog(text: string) {
const log = document.getElementById("log");
if (log != null) {
log.textContent += `${text}\n`;
}
console.log(text);
}

function parseToolCallBlocks(
content: string | null | undefined,
): ToolInvocation[] {
if (!content) {
throw new Error("Assistant reply did not contain a tool call.");
}
const regex = /<tool_call>\s*({[\s\S]*?})\s*<\/tool_call>/g;
const calls: ToolInvocation[] = [];
let match: RegExpExecArray | null;
while ((match = regex.exec(content)) !== null) {
const payload = JSON.parse(match[1]);
if (typeof payload.name !== "string" || payload.arguments === undefined) {
continue;
}
calls.push({ name: payload.name, arguments: payload.arguments });
}
if (calls.length === 0) {
throw new Error("Failed to find any <tool_call> blocks.");
}
return calls;
}

async function runTool(call: ToolInvocation): Promise<Record<string, unknown>> {
if (call.name === "get_weather") {
const location = String(call.arguments.location ?? "").trim() || "unknown";
const unit = (call.arguments.unit as string) ?? "celsius";
return {
location,
unit,
temperature: unit === "fahrenheit" ? 72.0 : 22.2,
conditions: "Clear skies",
source: "demo-weather-kit",
};
}
if (call.name === "get_time") {
const timezone = (call.arguments.timezone as string) ?? "UTC";
return {
timezone,
iso_time: new Date().toISOString(),
note: "Demo tool uses local clock only.",
};
}
return { error: `Tool ${call.name} is not implemented in the demo.` };
}

async function main() {
try {
appendLog("Loading model...");
const selectedModel = "Llama-3.2-1B-Instruct-q4f16_1-MLC";
const engine: webllm.MLCEngineInterface = await webllm.CreateMLCEngine(
selectedModel,
{ initProgressCallback: initProgressCallback, logLevel: "INFO" },
);

const systemPrompt =
"You are a MCP assistant. " +
'Use the provided tools and emit one or more <tool_call> blocks (one per tool you need) with a JSON body {"name": ..., "arguments": ...}. ' +
"Do not add extra prose when calling a tool." +
" Available tools: " +
JSON.stringify(
tools.map((tool) => ({
name: tool.name,
description: tool.description,
})),
null,
2,
);

const messages: webllm.ChatCompletionMessageParam[] = [
{ role: "system", content: systemPrompt },
{
role: "user",
content:
"Give me the weather in Paris in celsius and also tell me the current time in UTC.",
},
];

const responseFormat: webllm.ResponseFormat = {
type: "structural_tag",
structural_tag: mcpStructuralTag,
};

appendLog("Requesting constrained tool call...");
const toolCallReply = await engine.chat.completions.create({
stream: false,
messages,
max_tokens: 1024,
response_format: responseFormat,
});

const toolCallContent = toolCallReply.choices[0].message.content ?? "";
appendLog(`Assistant tool call:\n${toolCallContent}`);
const parsedCalls = parseToolCallBlocks(toolCallContent);
const toolCalls = parsedCalls.map((call, idx) => {
const toolCallId = `${call.name}-call-${idx + 1}`;
return { id: toolCallId, call };
});
messages.push({
role: "assistant",
content: toolCallContent,
tool_calls: toolCalls.map(({ id, call }) => ({
id,
type: "function",
function: {
name: call.name,
arguments: JSON.stringify(call.arguments),
},
})),
} as webllm.ChatCompletionMessageParam);

for (const { id, call } of toolCalls) {
const toolResult = await runTool(call);
messages.push({
role: "tool",
tool_call_id: id,
content: JSON.stringify(toolResult),
});
appendLog(
`Tool response for ${call.name}:\n${JSON.stringify(toolResult, null, 2)}`,
);
}

messages.push({
role: "user",
content:
"You have been given one or more tool responses above. Summarize ALL tool results in a single reply. Include both the weather details and the time information. Do not make up any values.",
});

appendLog("Requesting final assistant message...");
const finalReply = await engine.chat.completions.create({
stream: false,
messages,
max_tokens: 256,
});
const finalContent = finalReply.choices[0].message.content ?? "";
appendLog(`Final assistant message:\n${finalContent}`);
} catch (err) {
const message = err instanceof Error ? err.message : String(err);
appendLog(`Error: ${message}`);
console.error(err);
}
}

void main();
Loading