Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 24 additions & 0 deletions packages/tasks/src/local-apps.ts
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,23 @@ const snippetLlamacpp = (model: ModelData, filepath?: string): LocalAppSnippet[]
];
};

const snippetNodeLlamaCppCli = (model: ModelData, filepath?: string): LocalAppSnippet[] => {
return [
{
title: "Chat with the model",
content: [
`npx -y node-llama-cpp chat \\`,
` --model "hf:${model.id}/${filepath ?? "{{GGUF_FILE}}"}" \\`,
` --prompt 'Hi there!'`,
].join("\n"),
},
{
title: "Estimate the model compatibility with your hardware",
content: `npx -y node-llama-cpp inspect estimate "hf:${model.id}/${filepath ?? "{{GGUF_FILE}}"}"`,
},
];
};

const snippetLocalAI = (model: ModelData, filepath?: string): LocalAppSnippet[] => {
const command = (binary: string) =>
["# Load and run the model:", `${binary} huggingface://${model.id}/${filepath ?? "{{GGUF_FILE}}"}`].join("\n");
Expand Down Expand Up @@ -200,6 +217,13 @@ export const LOCAL_APPS = {
displayOnModelPage: isLlamaCppGgufModel,
snippet: snippetLlamacpp,
},
"node-llama-cpp-cli": {
prettyLabel: "node-llama-cpp (CLI)",
docsUrl: "https://node-llama-cpp.withcat.ai",
mainTask: "text-generation",
displayOnModelPage: isLlamaCppGgufModel,
snippet: snippetNodeLlamaCppCli,
},
vllm: {
prettyLabel: "vLLM",
docsUrl: "https://docs.vllm.ai",
Expand Down
16 changes: 16 additions & 0 deletions packages/tasks/src/model-libraries-snippets.ts
Original file line number Diff line number Diff line change
Expand Up @@ -360,6 +360,22 @@ backbone = keras_nlp.models.Backbone.from_preset("hf://${model.id}")
`,
];

export const nodeLlamaCpp = (model: ModelData): string[] => [
`import {getLlama, LlamaChatSession, resolveModelFile} from "node-llama-cpp";

const llama = await getLlama();
const modelPath = await resolveModelFile("hf:${model.id}/{{GGUF_FILE}}");

const model = await llama.loadModel({modelPath});
const context = await model.createContext();
const chat = new LlamaChatSession({
contextSequence: context.getSequence()
});

const res = await chat.prompt("Where do llamas come from?");
console.log("res:", res);`,
];

export const llama_cpp_python = (model: ModelData): string[] => [
`from llama_cpp import Llama

Expand Down
7 changes: 7 additions & 0 deletions packages/tasks/src/model-libraries.ts
Original file line number Diff line number Diff line change
Expand Up @@ -448,6 +448,13 @@ export const MODEL_LIBRARIES_UI_ELEMENTS = {
filter: true,
countDownloads: `path_extension:"nemo" OR path:"model_config.yaml"`,
},
"node-llama-cpp": {
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Are there any examples of where this is used? I don't see any tagged repos here: https://huggingface.co/models?other=node-llama-cpp

Note: Whichever repo has the tag node-llama-cpp would have the snippet you defined.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We should only have node-llama-cpp as a local app option, not a library, for consistency with the rest

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes! read this: #949 (review)

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

agree

Copy link
Contributor Author

@giladgd giladgd Oct 7, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

You can use node-llama-cpp to run llama.cpp inside a Node.js project in a similar manner that you can use llama-cpp-python to run llama.cpp inside a Python project, so I think it's useful to have a code snippet for this.
I see that llama-cpp-python appears on all llama.cpp compatible GGUF model repos, regardless of whether the repo has a llama-cpp-python tag (for example, this repo).

Since node-llama-cpp also makes it possible to use it as a CLI to interact with models without installing anything,
I think both cases are worth having an easy to use snippet.

Is there a way to make both accessible?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

IMO let's first ship in local apps and we can always revisit later

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Alright. I removed the library code snippet

prettyLabel: "node-llama-cpp",
repoName: "node-llama-cpp",
repoUrl: "https://github.com/withcatai/node-llama-cpp",
docsUrl: "https://node-llama-cpp.withcat.ai",
snippets: snippets.nodeLlamaCpp,
},
open_clip: {
prettyLabel: "OpenCLIP",
repoName: "OpenCLIP",
Expand Down
Loading