@@ -104,6 +104,29 @@ const snippetLlamacpp = (model: ModelData, filepath?: string): LocalAppSnippet[]
104104 ] ;
105105} ;
106106
107+ const snippetLlamafileGGUF = ( model : ModelData , filepath ?: string ) : LocalAppSnippet [ ] => {
108+ const command = ( binary : string ) =>
109+ [
110+ "# Load and run your model" ,
111+ `wget https://huggingface.co/${ model . id } /resolve/main/` . concat ( `${ filepath ?? "{{GGUF_FILE}}" } ` ) , // could not figure out how to do it organically
112+ `chmod +x ${ binary } ` ,
113+ `${ binary } -m ${ filepath ?? "{{GGUF_FILE}}" } -p 'You are a helpful assistant' ` , // will this create a second dropdown ?
114+ ] . join ( "\n" ) ;
115+ return [
116+ {
117+ title : "Use pre-built binary" ,
118+ setup : [
119+ // prettier-ignore
120+ "# Download pre-built binary from:" ,
121+ "# https://github.com/Mozilla-Ocho/llamafile/releases" ,
122+ "# Example : " ,
123+ "# wget https://github.com/Mozilla-Ocho/llamafile/releases/download/0.8.13/llamafile-0.8.13" ,
124+ ] . join ( "\n" ) ,
125+ content : command ( "./llamafile-0.8.13" )
126+ }
127+ ] ;
128+ } ;
129+
107130const snippetLocalAI = ( model : ModelData , filepath ?: string ) : LocalAppSnippet [ ] => {
108131 const command = ( binary : string ) =>
109132 [ "# Load and run the model:" , `${ binary } huggingface://${ model . id } /${ filepath ?? "{{GGUF_FILE}}" } ` ] . join ( "\n" ) ;
@@ -146,6 +169,15 @@ export const LOCAL_APPS = {
146169 displayOnModelPage : isLlamaCppGgufModel ,
147170 snippet : snippetLlamacpp ,
148171 } ,
172+ // llamafile uses .llamafile and .gguf files
173+ // update this later to handle .llamafile
174+ llamafile : {
175+ prettyLabel : "llamafile" ,
176+ docsUrl : "https://github.com/Mozilla-Ocho/llamafile" ,
177+ mainTask : "text-generation" ,
178+ displayOnModelPage : isLlamaCppGgufModel , // update this later to include .llamafile
179+ snippet : snippetLlamafileGGUF ,
180+ } ,
149181 lmstudio : {
150182 prettyLabel : "LM Studio" ,
151183 docsUrl : "https://lmstudio.ai" ,
0 commit comments