@@ -127,6 +127,23 @@ const snippetLlamacpp = (model: ModelData, filepath?: string): LocalAppSnippet[]
127127 ] ;
128128} ;
129129
130+ const snippetNodeLlamaCppCli = ( model : ModelData , filepath ?: string ) : LocalAppSnippet [ ] => {
131+ return [
132+ {
133+ title : "Chat with the model" ,
134+ content : [
135+ `npx -y node-llama-cpp chat \\` ,
136+ ` --model "hf:${ model . id } /${ filepath ?? "{{GGUF_FILE}}" } " \\` ,
137+ ` --prompt 'Hi there!'` ,
138+ ] . join ( "\n" ) ,
139+ } ,
140+ {
141+ title : "Estimate the model compatibility with your hardware" ,
142+ content : `npx -y node-llama-cpp inspect estimate "hf:${ model . id } /${ filepath ?? "{{GGUF_FILE}}" } "` ,
143+ } ,
144+ ] ;
145+ } ;
146+
130147const snippetOllama = ( model : ModelData , filepath ?: string ) : string => {
131148 if ( filepath ) {
132149 const quantLabel = parseGGUFQuantLabel ( filepath ) ;
@@ -245,6 +262,13 @@ export const LOCAL_APPS = {
245262 displayOnModelPage : isLlamaCppGgufModel ,
246263 snippet : snippetLlamacpp ,
247264 } ,
265+ "node-llama-cpp" : {
266+ prettyLabel : "node-llama-cpp" ,
267+ docsUrl : "https://node-llama-cpp.withcat.ai" ,
268+ mainTask : "text-generation" ,
269+ displayOnModelPage : isLlamaCppGgufModel ,
270+ snippet : snippetNodeLlamaCppCli ,
271+ } ,
248272 vllm : {
249273 prettyLabel : "vLLM" ,
250274 docsUrl : "https://docs.vllm.ai" ,
0 commit comments