Skip to content

Commit 58fd223

Browse files
committed
Add unit tests
1 parent 66c54e6 commit 58fd223

File tree

2 files changed

+102
-0
lines changed

2 files changed

+102
-0
lines changed
Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
import { PreTrainedTokenizer, GlmForCausalLM } from "../../../src/transformers.js";
2+
3+
import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../../init.js";
4+
5+
export default () => {
6+
describe("GlmForCausalLM", () => {
7+
const model_id = "hf-internal-testing/tiny-random-GlmForCausalLM";
8+
/** @type {GlmForCausalLM} */
9+
let model;
10+
/** @type {PreTrainedTokenizer} */
11+
let tokenizer;
12+
beforeAll(async () => {
13+
model = await GlmForCausalLM.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS);
14+
tokenizer = await PreTrainedTokenizer.from_pretrained(model_id);
15+
tokenizer.padding_side = "left";
16+
}, MAX_MODEL_LOAD_TIME);
17+
18+
it(
19+
"batch_size=1",
20+
async () => {
21+
const inputs = tokenizer("hello");
22+
const outputs = await model.generate({
23+
...inputs,
24+
max_length: 10,
25+
});
26+
expect(outputs.tolist()).toEqual([[23582n, 5797n, 38238n, 24486n, 36539n, 34489n, 6948n, 34489n, 6948n, 16014n]]);
27+
},
28+
MAX_TEST_EXECUTION_TIME,
29+
);
30+
31+
it(
32+
"batch_size>1",
33+
async () => {
34+
const inputs = tokenizer(["hello", "hello world"], { padding: true });
35+
const outputs = await model.generate({
36+
...inputs,
37+
max_length: 10,
38+
});
39+
expect(outputs.tolist()).toEqual([
40+
[59246n, 23582n, 5797n, 38238n, 24486n, 36539n, 34489n, 6948n, 34489n, 6948n],
41+
[23582n, 2901n, 39936n, 25036n, 55411n, 10337n, 3424n, 39183n, 30430n, 37285n],
42+
]);
43+
},
44+
MAX_TEST_EXECUTION_TIME,
45+
);
46+
47+
afterAll(async () => {
48+
await model?.dispose();
49+
}, MAX_MODEL_DISPOSE_TIME);
50+
});
51+
};
Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,51 @@
1+
import { PreTrainedTokenizer, HeliumForCausalLM } from "../../../src/transformers.js";
2+
3+
import { MAX_MODEL_LOAD_TIME, MAX_TEST_EXECUTION_TIME, MAX_MODEL_DISPOSE_TIME, DEFAULT_MODEL_OPTIONS } from "../../init.js";
4+
5+
export default () => {
6+
describe("HeliumForCausalLM", () => {
7+
const model_id = "hf-internal-testing/tiny-random-HeliumForCausalLM";
8+
/** @type {HeliumForCausalLM} */
9+
let model;
10+
/** @type {PreTrainedTokenizer} */
11+
let tokenizer;
12+
beforeAll(async () => {
13+
model = await HeliumForCausalLM.from_pretrained(model_id, DEFAULT_MODEL_OPTIONS);
14+
tokenizer = await PreTrainedTokenizer.from_pretrained(model_id);
15+
tokenizer.padding_side = "left";
16+
}, MAX_MODEL_LOAD_TIME);
17+
18+
it(
19+
"batch_size=1",
20+
async () => {
21+
const inputs = tokenizer("hello");
22+
const outputs = await model.generate({
23+
...inputs,
24+
max_length: 10,
25+
});
26+
expect(outputs.tolist()).toEqual([[1n, 456n, 5660n, 1700n, 1486n, 37744n, 35669n, 39396n, 12024n, 32253n]]);
27+
},
28+
MAX_TEST_EXECUTION_TIME,
29+
);
30+
31+
it(
32+
"batch_size>1",
33+
async () => {
34+
const inputs = tokenizer(["hello", "hello world"], { padding: true });
35+
const outputs = await model.generate({
36+
...inputs,
37+
max_length: 10,
38+
});
39+
expect(outputs.tolist()).toEqual([
40+
[3n, 1n, 456n, 5660n, 1700n, 1486n, 37744n, 35669n, 39396n, 12024n],
41+
[1n, 456n, 5660n, 998n, 6136n, 2080n, 172n, 8843n, 40579n, 23953n],
42+
]);
43+
},
44+
MAX_TEST_EXECUTION_TIME,
45+
);
46+
47+
afterAll(async () => {
48+
await model?.dispose();
49+
}, MAX_MODEL_DISPOSE_TIME);
50+
});
51+
};

0 commit comments

Comments
 (0)