Skip to content

Commit da193f1

Browse files
authored
MLX library: update to python snippets (huggingface#1445)
1 parent 0c063f5 commit da193f1

File tree

1 file changed

+48
-12
lines changed

1 file changed

+48
-12
lines changed

packages/tasks/src/model-libraries-snippets.ts

Lines changed: 48 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -1353,30 +1353,66 @@ model = SwarmFormerModel.from_pretrained("${model.id}")
13531353
];
13541354

13551355
const mlx_unknown = (model: ModelData): string[] => [
1356-
`pip install huggingface_hub hf_transfer
1356+
`# Download the model from the Hub
1357+
pip install huggingface_hub hf_transfer
13571358
13581359
export HF_HUB_ENABLE_HF_TRANSFER=1
13591360
huggingface-cli download --local-dir ${nameWithoutNamespace(model.id)} ${model.id}`,
13601361
];
13611362

13621363
const mlxlm = (model: ModelData): string[] => [
1363-
`pip install --upgrade mlx-lm
1364+
`# Make sure mlx-lm is installed
1365+
pip install --upgrade mlx-lm
13641366
1365-
mlx_lm.generate --model ${model.id} --prompt "Hello"`,
1367+
# Generate text with mlx-lm
1368+
from mlx_lm import load, generate
1369+
1370+
model, tokenizer = load("${model.id}")
1371+
1372+
prompt = "Once upon a time in"
1373+
text = generate(model, tokenizer, prompt=prompt, verbose=True)`,
13661374
];
13671375

13681376
const mlxchat = (model: ModelData): string[] => [
1369-
`pip install --upgrade mlx-lm
1377+
`# Make sure mlx-lm is installed
1378+
pip install --upgrade mlx-lm
1379+
1380+
# Generate text with mlx-lm
1381+
from mlx_lm import load, generate
13701382
1371-
mlx_lm.chat --model ${model.id}`,
1383+
model, tokenizer = load("${model.id}")
1384+
1385+
prompt = "Write a story about Einstein"
1386+
messages = [{"role": "user", "content": prompt}]
1387+
prompt = tokenizer.apply_chat_template(
1388+
messages, add_generation_prompt=True
1389+
)
1390+
1391+
text = generate(model, tokenizer, prompt=prompt, verbose=True)`,
13721392
];
13731393

13741394
const mlxvlm = (model: ModelData): string[] => [
1375-
`pip install --upgrade mlx-vlm
1395+
`Make sure mlx-vlm is installed
1396+
from mlx_vlm import load, generate
1397+
from mlx_vlm.prompt_utils import apply_chat_template
1398+
from mlx_vlm.utils import load_config
1399+
1400+
# Load the model
1401+
model, processor = load("${model.id}")
1402+
config = load_config("${model.id}")
1403+
1404+
# Prepare input
1405+
image = ["http://images.cocodataset.org/val2017/000000039769.jpg"]
1406+
prompt = "Describe this image."
1407+
1408+
# Apply chat template
1409+
formatted_prompt = apply_chat_template(
1410+
processor, config, prompt, num_images=1
1411+
)
13761412
1377-
mlx_vlm.generate --model ${model.id} \\
1378-
--prompt "Describe this image." \\
1379-
--image "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg"`,
1413+
# Generate output
1414+
output = generate(model, processor, formatted_prompt, image)
1415+
print(output)`,
13801416
];
13811417

13821418
export const mlxim = (model: ModelData): string[] => [
@@ -1386,11 +1422,11 @@ model = create_model(${model.id})`,
13861422
];
13871423

13881424
export const mlx = (model: ModelData): string[] => {
1389-
if (model.tags.includes("image-text-to-text")) {
1425+
if (model.pipeline_tag === "image-text-to-text") {
13901426
return mlxvlm(model);
13911427
}
1392-
if (model.tags.includes("conversational")) {
1393-
if (model.config?.tokenizer_config?.chat_template) {
1428+
if (model.pipeline_tag === "text-generation") {
1429+
if (model.tags.includes("conversational")) {
13941430
return mlxchat(model);
13951431
} else {
13961432
return mlxlm(model);

0 commit comments

Comments
 (0)