@@ -10,7 +10,6 @@ import {
1010 BertTokenizer ,
1111 T5Tokenizer ,
1212 WhisperTokenizer ,
13- BartTokenizer ,
1413 MarianTokenizer ,
1514 PreTrainedTokenizer ,
1615 AutoTokenizer ,
@@ -29,6 +28,7 @@ import {
2928 CohereForCausalLM ,
3029 GemmaForCausalLM ,
3130 Gemma2ForCausalLM ,
31+ GlmForCausalLM ,
3232 OPTForCausalLM ,
3333 GPTNeoXForCausalLM ,
3434 GPTJForCausalLM ,
@@ -1366,7 +1366,7 @@ describe("Tiny random models", () => {
13661366 } ) ;
13671367 } ) ;
13681368
1369- describe ( "gemma " , ( ) => {
1369+ describe ( "gemma2 " , ( ) => {
13701370 describe ( "Gemma2ForCausalLM" , ( ) => {
13711371 const model_id = "hf-internal-testing/tiny-random-Gemma2ForCausalLM" ;
13721372 /** @type {Gemma2ForCausalLM } */
@@ -1417,6 +1417,57 @@ describe("Tiny random models", () => {
14171417 } ) ;
14181418 } ) ;
14191419
1420+ describe ( "glm" , ( ) => {
1421+ describe ( "GlmForCausalLM" , ( ) => {
1422+ const model_id = "hf-internal-testing/tiny-random-GlmForCausalLM" ;
1423+ /** @type {GlmForCausalLM } */
1424+ let model ;
1425+ /** @type {PreTrainedTokenizer } */
1426+ let tokenizer ;
1427+ beforeAll ( async ( ) => {
1428+ model = await GlmForCausalLM . from_pretrained ( model_id , {
1429+ // TODO move to config
1430+ ...DEFAULT_MODEL_OPTIONS ,
1431+ } ) ;
1432+ tokenizer = await PreTrainedTokenizer . from_pretrained ( model_id ) ;
1433+ // tokenizer.padding_side = "left";
1434+ } , MAX_MODEL_LOAD_TIME ) ;
1435+
1436+ it (
1437+ "batch_size=1" ,
1438+ async ( ) => {
1439+ const inputs = tokenizer ( "hello" ) ;
1440+ const outputs = await model . generate ( {
1441+ ...inputs ,
1442+ max_length : 10 ,
1443+ } ) ;
1444+ expect ( outputs . tolist ( ) ) . toEqual ( [ [ 23582n , 5797n , 38238n , 24486n , 36539n , 34489n , 6948n , 34489n , 6948n , 34489n ] ] ) ;
1445+ } ,
1446+ MAX_TEST_EXECUTION_TIME ,
1447+ ) ;
1448+
1449+ it (
1450+ "batch_size>1" ,
1451+ async ( ) => {
1452+ const inputs = tokenizer ( [ "hello" , "hello world" ] , { padding : true } ) ;
1453+ const outputs = await model . generate ( {
1454+ ...inputs ,
1455+ max_length : 10 ,
1456+ } ) ;
1457+ expect ( outputs . tolist ( ) ) . toEqual ( [
1458+ [ 59246n , 23582n , 5797n , 38238n , 24486n , 36539n , 34489n , 6948n , 34489n , 6948n ] ,
1459+ [ 23582n , 2901n , 39936n , 25036n , 55411n , 10337n , 3424n , 39183n , 30430n , 37285n ]
1460+ ] ) ;
1461+ } ,
1462+ MAX_TEST_EXECUTION_TIME ,
1463+ ) ;
1464+
1465+ afterAll ( async ( ) => {
1466+ await model ?. dispose ( ) ;
1467+ } , MAX_MODEL_DISPOSE_TIME ) ;
1468+ } ) ;
1469+ } ) ;
1470+
14201471 describe ( "gpt_neo" , ( ) => {
14211472 describe ( "GPTNeoForCausalLM" , ( ) => {
14221473 const model_id = "hf-internal-testing/tiny-random-GPTNeoForCausalLM" ;
0 commit comments