Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 8 additions & 8 deletions packages/tasks/src/snippets/js.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,9 @@ describe("inference API snippets", () => {
};
const snippet = getJsInferenceSnippet(model, "api_token") as InferenceSnippet[];

expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"
expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference";

const client = new HfInference("api_token")
const client = new HfInference("api_token");

let out = "";

Expand Down Expand Up @@ -47,9 +47,9 @@ for await (const chunk of stream) {
};
const snippet = getJsInferenceSnippet(model, "api_token", { streaming: false }) as InferenceSnippet[];

expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"
expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference";

const client = new HfInference("api_token")
const client = new HfInference("api_token");

const chatCompletion = await client.chatCompletion({
model: "meta-llama/Llama-3.1-8B-Instruct",
Expand All @@ -74,9 +74,9 @@ console.log(chatCompletion.choices[0].message);`);
};
const snippet = getJsInferenceSnippet(model, "api_token") as InferenceSnippet[];

expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"
expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference";

const client = new HfInference("api_token")
const client = new HfInference("api_token");

let out = "";

Expand Down Expand Up @@ -120,9 +120,9 @@ for await (const chunk of stream) {
};
const snippet = getJsInferenceSnippet(model, "api_token") as InferenceSnippet[];

expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"
expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference";

const client = new HfInference("api_token")
const client = new HfInference("api_token");

let out = "";

Expand Down
16 changes: 8 additions & 8 deletions packages/tasks/src/snippets/js.ts
Original file line number Diff line number Diff line change
Expand Up @@ -58,9 +58,9 @@ export const snippetTextGeneration = (
return [
{
client: "huggingface.js",
content: `import { HfInference } from "@huggingface/inference"
content: `import { HfInference } from "@huggingface/inference";

const client = new HfInference("${accessToken || `{API_TOKEN}`}")
const client = new HfInference("${accessToken || `{API_TOKEN}`}");

let out = "";

Expand All @@ -80,12 +80,12 @@ for await (const chunk of stream) {
},
{
client: "openai",
content: `import { OpenAI } from "openai"
content: `import { OpenAI } from "openai";

const client = new OpenAI({
baseURL: "https://api-inference.huggingface.co/v1/",
apiKey: "${accessToken || `{API_TOKEN}`}"
})
});

let out = "";

Expand All @@ -109,9 +109,9 @@ for await (const chunk of stream) {
return [
{
client: "huggingface.js",
content: `import { HfInference } from "@huggingface/inference"
content: `import { HfInference } from "@huggingface/inference";

const client = new HfInference("${accessToken || `{API_TOKEN}`}")
const client = new HfInference("${accessToken || `{API_TOKEN}`}");

const chatCompletion = await client.chatCompletion({
model: "${model.id}",
Expand All @@ -123,12 +123,12 @@ console.log(chatCompletion.choices[0].message);`,
},
{
client: "openai",
content: `import { OpenAI } from "openai"
content: `import { OpenAI } from "openai";

const client = new OpenAI({
baseURL: "https://api-inference.huggingface.co/v1/",
apiKey: "${accessToken || `{API_TOKEN}`}"
})
});

const chatCompletion = await client.chat.completions.create({
model: "${model.id}",
Expand Down
Loading