@@ -119,12 +119,12 @@ const snippetLlamacpp = (model: ModelData, filepath?: string): LocalAppSnippet[]
119119 {
120120 title : "Install from brew" ,
121121 setup : "brew install llama.cpp" ,
122- content : command ( "llama-cli " ) ,
122+ content : command ( "llama-server " ) ,
123123 } ,
124124 {
125125 title : "Install from WinGet (Windows)" ,
126126 setup : "winget install llama.cpp" ,
127- content : command ( "llama-cli " ) ,
127+ content : command ( "llama-server " ) ,
128128 } ,
129129 {
130130 title : "Use pre-built binary" ,
@@ -133,17 +133,17 @@ const snippetLlamacpp = (model: ModelData, filepath?: string): LocalAppSnippet[]
133133 "# Download pre-built binary from:" ,
134134 "# https://github.com/ggerganov/llama.cpp/releases" ,
135135 ] . join ( "\n" ) ,
136- content : command ( "./llama-cli " ) ,
136+ content : command ( "./llama-server " ) ,
137137 } ,
138138 {
139139 title : "Build from source code" ,
140140 setup : [
141141 "git clone https://github.com/ggerganov/llama.cpp.git" ,
142142 "cd llama.cpp" ,
143143 "cmake -B build -DLLAMA_CURL=ON" ,
144- "cmake --build build -j --target llama-cli " ,
144+ "cmake --build build -j --target llama-server " ,
145145 ] . join ( "\n" ) ,
146- content : command ( "./build/bin/llama-cli " ) ,
146+ content : command ( "./build/bin/llama-server " ) ,
147147 } ,
148148 ] ;
149149} ;
0 commit comments