@@ -1353,30 +1353,66 @@ model = SwarmFormerModel.from_pretrained("${model.id}")
1353
1353
] ;
1354
1354
1355
1355
const mlx_unknown = ( model : ModelData ) : string [ ] => [
1356
- `pip install huggingface_hub hf_transfer
1356
+ `# Download the model from the Hub
1357
+ pip install huggingface_hub hf_transfer
1357
1358
1358
1359
export HF_HUB_ENABLE_HF_TRANSFER=1
1359
1360
huggingface-cli download --local-dir ${ nameWithoutNamespace ( model . id ) } ${ model . id } ` ,
1360
1361
] ;
1361
1362
1362
1363
const mlxlm = ( model : ModelData ) : string [ ] => [
1363
- `pip install --upgrade mlx-lm
1364
+ `# Make sure mlx-lm is installed
1365
+ pip install --upgrade mlx-lm
1364
1366
1365
- mlx_lm.generate --model ${ model . id } --prompt "Hello"` ,
1367
+ # Generate text with mlx-lm
1368
+ from mlx_lm import load, generate
1369
+
1370
+ model, tokenizer = load("${ model . id } ")
1371
+
1372
+ prompt = "Once upon a time in"
1373
+ text = generate(model, tokenizer, prompt=prompt, verbose=True)` ,
1366
1374
] ;
1367
1375
1368
1376
const mlxchat = ( model : ModelData ) : string [ ] => [
1369
- `pip install --upgrade mlx-lm
1377
+ `# Make sure mlx-lm is installed
1378
+ pip install --upgrade mlx-lm
1379
+
1380
+ # Generate text with mlx-lm
1381
+ from mlx_lm import load, generate
1370
1382
1371
- mlx_lm.chat --model ${ model . id } ` ,
1383
+ model, tokenizer = load("${ model . id } ")
1384
+
1385
+ prompt = "Write a story about Einstein"
1386
+ messages = [{"role": "user", "content": prompt}]
1387
+ prompt = tokenizer.apply_chat_template(
1388
+ messages, add_generation_prompt=True
1389
+ )
1390
+
1391
+ text = generate(model, tokenizer, prompt=prompt, verbose=True)` ,
1372
1392
] ;
1373
1393
1374
1394
const mlxvlm = ( model : ModelData ) : string [ ] => [
1375
- `pip install --upgrade mlx-vlm
1395
+ `Make sure mlx-vlm is installed
1396
+ from mlx_vlm import load, generate
1397
+ from mlx_vlm.prompt_utils import apply_chat_template
1398
+ from mlx_vlm.utils import load_config
1399
+
1400
+ # Load the model
1401
+ model, processor = load("${ model . id } ")
1402
+ config = load_config("${ model . id } ")
1403
+
1404
+ # Prepare input
1405
+ image = ["http://images.cocodataset.org/val2017/000000039769.jpg"]
1406
+ prompt = "Describe this image."
1407
+
1408
+ # Apply chat template
1409
+ formatted_prompt = apply_chat_template(
1410
+ processor, config, prompt, num_images=1
1411
+ )
1376
1412
1377
- mlx_vlm.generate --model ${ model . id } \\
1378
- --prompt "Describe this image." \\
1379
- --image "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg" `,
1413
+ # Generate output
1414
+ output = generate(model, processor, formatted_prompt, image)
1415
+ print(output) `,
1380
1416
] ;
1381
1417
1382
1418
export const mlxim = ( model : ModelData ) : string [ ] => [
@@ -1386,11 +1422,11 @@ model = create_model(${model.id})`,
1386
1422
] ;
1387
1423
1388
1424
export const mlx = ( model : ModelData ) : string [ ] => {
1389
- if ( model . tags . includes ( "image-text-to-text" ) ) {
1425
+ if ( model . pipeline_tag === "image-text-to-text" ) {
1390
1426
return mlxvlm ( model ) ;
1391
1427
}
1392
- if ( model . tags . includes ( "conversational" ) ) {
1393
- if ( model . config ?. tokenizer_config ?. chat_template ) {
1428
+ if ( model . pipeline_tag === "text-generation" ) {
1429
+ if ( model . tags . includes ( "conversational" ) ) {
1394
1430
return mlxchat ( model ) ;
1395
1431
} else {
1396
1432
return mlxlm ( model ) ;
0 commit comments