Skip to content

Commit f584263

Browse files
committed
test: first function call prefix detected and used
1 parent 9632244 commit f584263

File tree

1 file changed

+117
-0
lines changed

1 file changed

+117
-0
lines changed
Lines changed: 117 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,117 @@
1+
import {describe, expect, test} from "vitest";
2+
import {
3+
defineChatSessionFunction, JinjaTemplateChatWrapper, LlamaChatSession, QwenChatWrapper, resolveChatWrapper
4+
} from "../../../src/index.js";
5+
import {getModelFile} from "../../utils/modelFiles.js";
6+
import {getTestLlama} from "../../utils/getTestLlama.js";
7+
8+
describe("qwen3 0.6b", () => {
9+
describe("functions", () => {
10+
test("get n-th word", {timeout: 1000 * 60 * 60 * 2}, async () => {
11+
const modelPath = await getModelFile("Qwen3-0.6B-Q8_0.gguf");
12+
const llama = await getTestLlama();
13+
14+
const model = await llama.loadModel({
15+
modelPath
16+
});
17+
const context = await model.createContext({
18+
contextSize: 1024
19+
});
20+
const chatSession = new LlamaChatSession({
21+
contextSequence: context.getSequence()
22+
});
23+
expect(chatSession.chatWrapper).to.be.instanceof(QwenChatWrapper);
24+
25+
const promptOptions: Parameters<typeof chatSession.prompt>[1] = {
26+
functions: {
27+
getNthWord: defineChatSessionFunction({
28+
description: "Get an n-th word",
29+
params: {
30+
type: "object",
31+
properties: {
32+
n: {
33+
enum: [1, 2, 3, 4]
34+
}
35+
}
36+
},
37+
handler(params) {
38+
return ["very", "secret", "this", "hello"][params.n - 1];
39+
}
40+
})
41+
}
42+
} as const;
43+
44+
const res = await chatSession.prompt("What is the second word?", {
45+
...promptOptions,
46+
maxTokens: 200
47+
});
48+
49+
expect(res.trim()).to.be.eq('The second word is "secret".');
50+
51+
const res2 = await chatSession.prompt("Explain what this word means", {
52+
...promptOptions,
53+
maxTokens: 40,
54+
budgets: {
55+
thoughtTokens: 0
56+
}
57+
});
58+
59+
expect(res2.length).to.be.greaterThan(1);
60+
});
61+
62+
test("get n-th word using jinja template", {timeout: 1000 * 60 * 60 * 2}, async () => {
63+
const modelPath = await getModelFile("Qwen3-0.6B-Q8_0.gguf");
64+
const llama = await getTestLlama();
65+
66+
const model = await llama.loadModel({
67+
modelPath
68+
});
69+
const context = await model.createContext({
70+
contextSize: 1024
71+
});
72+
const chatSession = new LlamaChatSession({
73+
contextSequence: context.getSequence(),
74+
chatWrapper: resolveChatWrapper(model, {
75+
type: "jinjaTemplate"
76+
})
77+
});
78+
expect(chatSession.chatWrapper).to.be.instanceof(JinjaTemplateChatWrapper);
79+
80+
const promptOptions: Parameters<typeof chatSession.prompt>[1] = {
81+
functions: {
82+
getNthWord: defineChatSessionFunction({
83+
description: "Get an n-th word",
84+
params: {
85+
type: "object",
86+
properties: {
87+
n: {
88+
enum: [1, 2, 3, 4]
89+
}
90+
}
91+
},
92+
handler(params) {
93+
return ["very", "secret", "this", "hello"][params.n - 1];
94+
}
95+
})
96+
}
97+
} as const;
98+
99+
const res = await chatSession.prompt("What is the second word?", {
100+
...promptOptions,
101+
maxTokens: 205
102+
});
103+
104+
expect(res.trim()).to.be.eq('The second word is **"secret"**.');
105+
106+
const res2 = await chatSession.prompt("Explain what this word means", {
107+
...promptOptions,
108+
maxTokens: 40,
109+
budgets: {
110+
thoughtTokens: 0
111+
}
112+
});
113+
114+
expect(res2.length).to.be.greaterThan(1);
115+
});
116+
});
117+
});

0 commit comments

Comments
 (0)