@@ -262,6 +262,41 @@ const snippetTgi = (model: ModelData): LocalAppSnippet[] => {
262
262
] ;
263
263
} ;
264
264
265
+ const snippetMlxLm = ( model : ModelData ) : LocalAppSnippet [ ] => {
266
+ const openaiCurl = [
267
+ "# Calling the OpenAI-compatible server with curl" ,
268
+ `curl -X POST "http://localhost:8000/v1/chat/completions" \\` ,
269
+ ` -H "Content-Type: application/json" \\` ,
270
+ ` --data '{` ,
271
+ ` "model": "${ model . id } ",` ,
272
+ ` "messages": [` ,
273
+ ` {"role": "user", "content": "Hello"}` ,
274
+ ` ]` ,
275
+ ` }'` ,
276
+ ] ;
277
+
278
+ return [
279
+ {
280
+ title : "Generate or start a chat session" ,
281
+ setup : [ "# Install MLX LM" , "uv tool install mlx-lm" ] . join ( "\n" ) ,
282
+ content : [
283
+ ...( model . tags . includes ( "conversational" )
284
+ ? [ "# Interactive chat REPL" , `mlx_lm.chat --model "${ model . id } "` ]
285
+ : [ "# Generate some text" , `mlx_lm.generate --model "${ model . id } " --prompt "Once upon a time"` ] ) ,
286
+ ] . join ( "\n" ) ,
287
+ } ,
288
+ ...( model . tags . includes ( "conversational" )
289
+ ? [
290
+ {
291
+ title : "Run an OpenAI-compatible server" ,
292
+ setup : [ "# Install MLX LM" , "uv tool install mlx-lm" ] . join ( "\n" ) ,
293
+ content : [ "# Start the server" , `mlx_lm.server --model "${ model . id } "` , ...openaiCurl ] . join ( "\n" ) ,
294
+ } ,
295
+ ]
296
+ : [ ] ) ,
297
+ ] ;
298
+ } ;
299
+
265
300
/**
266
301
* Add your new local app here.
267
302
*
@@ -302,6 +337,13 @@ export const LOCAL_APPS = {
302
337
( model . pipeline_tag === "text-generation" || model . pipeline_tag === "image-text-to-text" ) ,
303
338
snippet : snippetVllm ,
304
339
} ,
340
+ "mlx-lm" : {
341
+ prettyLabel : "MLX LM" ,
342
+ docsUrl : "https://github.com/ml-explore/mlx-lm" ,
343
+ mainTask : "text-generation" ,
344
+ displayOnModelPage : ( model ) => model . pipeline_tag === "text-generation" && isMlxModel ( model ) ,
345
+ snippet : snippetMlxLm ,
346
+ } ,
305
347
tgi : {
306
348
prettyLabel : "TGI" ,
307
349
docsUrl : "https://huggingface.co/docs/text-generation-inference/" ,
0 commit comments