From cc5b6451b42cac66bf3bb91199aad378e406f066 Mon Sep 17 00:00:00 2001 From: Mishig Davaadorj Date: Fri, 15 Nov 2024 11:56:28 +0100 Subject: [PATCH] [Conversational Snippet] add missing semicolons --- packages/tasks/src/snippets/js.spec.ts | 16 ++++++++-------- packages/tasks/src/snippets/js.ts | 16 ++++++++-------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/packages/tasks/src/snippets/js.spec.ts b/packages/tasks/src/snippets/js.spec.ts index 644f9a548a..50be868aac 100644 --- a/packages/tasks/src/snippets/js.spec.ts +++ b/packages/tasks/src/snippets/js.spec.ts @@ -12,9 +12,9 @@ describe("inference API snippets", () => { }; const snippet = getJsInferenceSnippet(model, "api_token") as InferenceSnippet[]; - expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference" + expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"; -const client = new HfInference("api_token") +const client = new HfInference("api_token"); let out = ""; @@ -47,9 +47,9 @@ for await (const chunk of stream) { }; const snippet = getJsInferenceSnippet(model, "api_token", { streaming: false }) as InferenceSnippet[]; - expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference" + expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"; -const client = new HfInference("api_token") +const client = new HfInference("api_token"); const chatCompletion = await client.chatCompletion({ model: "meta-llama/Llama-3.1-8B-Instruct", @@ -74,9 +74,9 @@ console.log(chatCompletion.choices[0].message);`); }; const snippet = getJsInferenceSnippet(model, "api_token") as InferenceSnippet[]; - expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference" + expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"; -const client = new HfInference("api_token") +const client = new HfInference("api_token"); let out = ""; @@ -120,9 +120,9 @@ for await (const chunk of stream) { }; const snippet = getJsInferenceSnippet(model, "api_token") as InferenceSnippet[]; - expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference" + expect(snippet[0].content).toEqual(`import { HfInference } from "@huggingface/inference"; -const client = new HfInference("api_token") +const client = new HfInference("api_token"); let out = ""; diff --git a/packages/tasks/src/snippets/js.ts b/packages/tasks/src/snippets/js.ts index 2728bbf98a..e1124fa1e5 100644 --- a/packages/tasks/src/snippets/js.ts +++ b/packages/tasks/src/snippets/js.ts @@ -58,9 +58,9 @@ export const snippetTextGeneration = ( return [ { client: "huggingface.js", - content: `import { HfInference } from "@huggingface/inference" + content: `import { HfInference } from "@huggingface/inference"; -const client = new HfInference("${accessToken || `{API_TOKEN}`}") +const client = new HfInference("${accessToken || `{API_TOKEN}`}"); let out = ""; @@ -80,12 +80,12 @@ for await (const chunk of stream) { }, { client: "openai", - content: `import { OpenAI } from "openai" + content: `import { OpenAI } from "openai"; const client = new OpenAI({ baseURL: "https://api-inference.huggingface.co/v1/", apiKey: "${accessToken || `{API_TOKEN}`}" -}) +}); let out = ""; @@ -109,9 +109,9 @@ for await (const chunk of stream) { return [ { client: "huggingface.js", - content: `import { HfInference } from "@huggingface/inference" + content: `import { HfInference } from "@huggingface/inference"; -const client = new HfInference("${accessToken || `{API_TOKEN}`}") +const client = new HfInference("${accessToken || `{API_TOKEN}`}"); const chatCompletion = await client.chatCompletion({ model: "${model.id}", @@ -123,12 +123,12 @@ console.log(chatCompletion.choices[0].message);`, }, { client: "openai", - content: `import { OpenAI } from "openai" + content: `import { OpenAI } from "openai"; const client = new OpenAI({ baseURL: "https://api-inference.huggingface.co/v1/", apiKey: "${accessToken || `{API_TOKEN}`}" -}) +}); const chatCompletion = await client.chat.completions.create({ model: "${model.id}",