@@ -267,6 +267,32 @@ const snippetTgi = (model: ModelData): LocalAppSnippet[] => {
267267 ] ;
268268} ;
269269
270+ const snippetLlamafile = ( model : ModelData , filepath ?: string ) : LocalAppSnippet [ ] => {
271+ const modelFile = filepath ?? "{{GGUF_FILE}}" ;
272+ const command = ( binary : string ) => {
273+ const snippet = [ "# Load and run the model:" , `${ binary } -m ${ modelFile } ` ] ;
274+ if ( ! model . tags . includes ( "conversational" ) ) {
275+ snippet . push ( '-p "Once upon a time,"' ) ;
276+ }
277+ return snippet . join ( "\n" ) ;
278+ } ;
279+
280+ return [
281+ {
282+ title : "Use pre-built binary" ,
283+ setup : [
284+ "# Download llamafile binary and rename it to llamafile.exe from:" ,
285+ "# https://github.com/Mozilla-Ocho/llamafile/releases/latest" ,
286+ "# Make the binary executable:" ,
287+ "chmod +x ./llamafile.exe" ,
288+ "# Download the model:" ,
289+ `wget https://huggingface.co/${ model . id } /resolve/main/${ modelFile } ` ,
290+ ] . join ( "\n" ) ,
291+ content : command ( "./llamafile.exe" ) ,
292+ } ,
293+ ] ;
294+ } ;
295+
270296/**
271297 * Add your new local app here.
272298 *
@@ -430,6 +456,13 @@ export const LOCAL_APPS = {
430456 displayOnModelPage : isLlamaCppGgufModel ,
431457 snippet : snippetOllama ,
432458 } ,
459+ llamafile : {
460+ prettyLabel : "llamafile" ,
461+ docsUrl : "https://github.com/Mozilla-Ocho/llamafile" ,
462+ mainTask : "text-generation" ,
463+ displayOnModelPage : isLlamaCppGgufModel ,
464+ snippet : snippetLlamafile ,
465+ } ,
433466} satisfies Record < string , LocalApp > ;
434467
435468export type LocalAppKey = keyof typeof LOCAL_APPS ;
0 commit comments