diff --git a/packages/inference/src/snippets/getInferenceSnippets.ts b/packages/inference/src/snippets/getInferenceSnippets.ts index 1df552cee4..767d6cc40f 100644 --- a/packages/inference/src/snippets/getInferenceSnippets.ts +++ b/packages/inference/src/snippets/getInferenceSnippets.ts @@ -14,7 +14,10 @@ import { makeRequestOptionsFromResolvedModel } from "../lib/makeRequestOptions.j import type { InferenceProviderOrPolicy, InferenceTask, RequestArgs } from "../types.js"; import { templates } from "./templates.exported.js"; -export type InferenceSnippetOptions = { streaming?: boolean; billTo?: string } & Record; +export type InferenceSnippetOptions = { streaming?: boolean; billTo?: string; accessToken?: string } & Record< + string, + unknown +>; const PYTHON_CLIENTS = ["huggingface_hub", "fal_client", "requests", "openai"] as const; const JS_CLIENTS = ["fetch", "huggingface.js", "openai"] as const; @@ -121,11 +124,12 @@ const HF_JS_METHODS: Partial> = { translation: "translation", }; +const ACCESS_TOKEN_PLACEHOLDER = ""; // Placeholder to replace with env variable in snippets + // Snippet generators const snippetGenerator = (templateName: string, inputPreparationFn?: InputPreparationFn) => { return ( model: ModelDataMinimal, - accessToken: string, provider: InferenceProviderOrPolicy, inferenceProviderMapping?: InferenceProviderModelMapping, opts?: InferenceSnippetOptions @@ -149,13 +153,15 @@ const snippetGenerator = (templateName: string, inputPreparationFn?: InputPrepar console.error(`Failed to get provider helper for ${provider} (${task})`, e); return []; } + const accessTokenOrPlaceholder = opts?.accessToken ?? ACCESS_TOKEN_PLACEHOLDER; + /// Prepare inputs + make request const inputs = inputPreparationFn ? inputPreparationFn(model, opts) : { inputs: getModelInputSnippet(model) }; const request = makeRequestOptionsFromResolvedModel( providerModelId, providerHelper, { - accessToken, + accessToken: accessTokenOrPlaceholder, provider, ...inputs, } as RequestArgs, @@ -180,7 +186,7 @@ const snippetGenerator = (templateName: string, inputPreparationFn?: InputPrepar /// Prepare template injection data const params: TemplateParams = { - accessToken, + accessToken: accessTokenOrPlaceholder, authorizationHeader: (request.info.headers as Record)?.Authorization, baseUrl: removeSuffix(request.url, "/chat/completions"), fullUrl: request.url, @@ -248,6 +254,11 @@ const snippetGenerator = (templateName: string, inputPreparationFn?: InputPrepar snippet = `${importSection}\n\n${snippet}`; } + /// Replace access token placeholder + if (snippet.includes(ACCESS_TOKEN_PLACEHOLDER)) { + snippet = replaceAccessTokenPlaceholder(snippet, language, provider); + } + /// Snippet is ready! return { language, client: client as string, content: snippet }; }) @@ -299,7 +310,6 @@ const snippets: Partial< PipelineType, ( model: ModelDataMinimal, - accessToken: string, provider: InferenceProviderOrPolicy, inferenceProviderMapping?: InferenceProviderModelMapping, opts?: InferenceSnippetOptions @@ -339,13 +349,12 @@ const snippets: Partial< export function getInferenceSnippets( model: ModelDataMinimal, - accessToken: string, provider: InferenceProviderOrPolicy, inferenceProviderMapping?: InferenceProviderModelMapping, opts?: Record ): InferenceSnippet[] { return model.pipeline_tag && model.pipeline_tag in snippets - ? snippets[model.pipeline_tag]?.(model, accessToken, provider, inferenceProviderMapping, opts) ?? [] + ? snippets[model.pipeline_tag]?.(model, provider, inferenceProviderMapping, opts) ?? [] : []; } @@ -420,3 +429,56 @@ function indentString(str: string): string { function removeSuffix(str: string, suffix: string) { return str.endsWith(suffix) ? str.slice(0, -suffix.length) : str; } + +function replaceAccessTokenPlaceholder( + snippet: string, + language: InferenceSnippetLanguage, + provider: InferenceProviderOrPolicy +): string { + // If "opts.accessToken" is not set, the snippets are generated with a placeholder. + // Once snippets are rendered, we replace the placeholder with code to fetch the access token from an environment variable. + + // Determine if HF_TOKEN or specific provider token should be used + const accessTokenEnvVar = + !snippet.includes("https://") || // no URL provided => using a client => use $HF_TOKEN + snippet.includes("https://router.huggingface.co") || // explicit routed request => use $HF_TOKEN + provider == "hf-inference" // hf-inference provider => use $HF_TOKEN + ? "HF_TOKEN" + : provider.toUpperCase().replace("-", "_") + "_API_KEY"; // e.g. "REPLICATE_API_KEY" + + // Replace the placeholder with the env variable + if (language === "sh") { + snippet = snippet.replace( + `'Authorization: Bearer ${ACCESS_TOKEN_PLACEHOLDER}'`, + `"Authorization: Bearer $${accessTokenEnvVar}"` // e.g. "Authorization: Bearer $HF_TOKEN" + ); + } else if (language === "python") { + snippet = "import os\n" + snippet; + snippet = snippet.replace( + `"${ACCESS_TOKEN_PLACEHOLDER}"`, + `os.environ["${accessTokenEnvVar}"]` // e.g. os.environ["HF_TOKEN") + ); + snippet = snippet.replace( + `"Bearer ${ACCESS_TOKEN_PLACEHOLDER}"`, + `f"Bearer {os.environ['${accessTokenEnvVar}']}"` // e.g. f"Bearer {os.environ['HF_TOKEN']}" + ); + snippet = snippet.replace( + `"Key ${ACCESS_TOKEN_PLACEHOLDER}"`, + `f"Key {os.environ['${accessTokenEnvVar}']}"` // e.g. f"Key {os.environ['FAL_AI_API_KEY']}" + ); + } else if (language === "js") { + snippet = snippet.replace( + `"${ACCESS_TOKEN_PLACEHOLDER}"`, + `process.env.${accessTokenEnvVar}` // e.g. process.env.HF_TOKEN + ); + snippet = snippet.replace( + `Authorization: "Bearer ${ACCESS_TOKEN_PLACEHOLDER}",`, + `Authorization: \`Bearer $\{process.env.${accessTokenEnvVar}}\`,` // e.g. Authorization: `Bearer ${process.env.HF_TOKEN}`, + ); + snippet = snippet.replace( + `Authorization: "Key ${ACCESS_TOKEN_PLACEHOLDER}",`, + `Authorization: \`Key $\{process.env.${accessTokenEnvVar}}\`,` // e.g. Authorization: `Key ${process.env.FAL_AI_API_KEY}`, + ); + } + return snippet; +} diff --git a/packages/tasks-gen/scripts/generate-snippets-fixtures.ts b/packages/tasks-gen/scripts/generate-snippets-fixtures.ts index 34de742431..c18e2fdd00 100644 --- a/packages/tasks-gen/scripts/generate-snippets-fixtures.ts +++ b/packages/tasks-gen/scripts/generate-snippets-fixtures.ts @@ -240,6 +240,18 @@ const TEST_CASES: { providers: ["hf-inference"], opts: { billTo: "huggingface" }, }, + { + testName: "with-access-token", + task: "conversational", + model: { + id: "meta-llama/Llama-3.1-8B-Instruct", + pipeline_tag: "text-generation", + tags: ["conversational"], + inference: "", + }, + providers: ["hf-inference"], + opts: { accessToken: "hf_xxx" }, + }, { testName: "text-to-speech", task: "text-to-speech", @@ -314,7 +326,6 @@ function generateInferenceSnippet( ): InferenceSnippet[] { const allSnippets = snippets.getInferenceSnippets( model, - "api_token", provider, { hfModelId: model.id, diff --git a/packages/tasks-gen/snippets-fixtures/automatic-speech-recognition/js/fetch/0.hf-inference.js b/packages/tasks-gen/snippets-fixtures/automatic-speech-recognition/js/fetch/0.hf-inference.js index 974e1215b0..7dce21bf33 100644 --- a/packages/tasks-gen/snippets-fixtures/automatic-speech-recognition/js/fetch/0.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/automatic-speech-recognition/js/fetch/0.hf-inference.js @@ -3,7 +3,7 @@ async function query(data) { "https://router.huggingface.co/hf-inference/models/openai/whisper-large-v3-turbo", { headers: { - Authorization: "Bearer api_token", + Authorization: `Bearer ${process.env.HF_TOKEN}`, "Content-Type": "audio/flac", }, method: "POST", diff --git a/packages/tasks-gen/snippets-fixtures/automatic-speech-recognition/js/huggingface.js/0.hf-inference.js b/packages/tasks-gen/snippets-fixtures/automatic-speech-recognition/js/huggingface.js/0.hf-inference.js index 225de6b761..2c83526227 100644 --- a/packages/tasks-gen/snippets-fixtures/automatic-speech-recognition/js/huggingface.js/0.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/automatic-speech-recognition/js/huggingface.js/0.hf-inference.js @@ -1,6 +1,6 @@ import { InferenceClient } from "@huggingface/inference"; -const client = new InferenceClient("api_token"); +const client = new InferenceClient(process.env.HF_TOKEN); const data = fs.readFileSync("sample1.flac"); diff --git a/packages/tasks-gen/snippets-fixtures/automatic-speech-recognition/python/huggingface_hub/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/automatic-speech-recognition/python/huggingface_hub/0.hf-inference.py index fa9955a05b..e37e00736b 100644 --- a/packages/tasks-gen/snippets-fixtures/automatic-speech-recognition/python/huggingface_hub/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/automatic-speech-recognition/python/huggingface_hub/0.hf-inference.py @@ -1,8 +1,9 @@ +import os from huggingface_hub import InferenceClient client = InferenceClient( provider="hf-inference", - api_key="api_token", + api_key=os.environ["HF_TOKEN"], ) output = client.automatic_speech_recognition("sample1.flac", model="openai/whisper-large-v3-turbo") \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/automatic-speech-recognition/python/requests/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/automatic-speech-recognition/python/requests/0.hf-inference.py index d19a3aff2d..eb59879fbf 100644 --- a/packages/tasks-gen/snippets-fixtures/automatic-speech-recognition/python/requests/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/automatic-speech-recognition/python/requests/0.hf-inference.py @@ -1,8 +1,9 @@ +import os import requests API_URL = "https://router.huggingface.co/hf-inference/models/openai/whisper-large-v3-turbo" headers = { - "Authorization": "Bearer api_token", + "Authorization": f"Bearer {os.environ['HF_TOKEN']}", } def query(filename): diff --git a/packages/tasks-gen/snippets-fixtures/automatic-speech-recognition/sh/curl/0.hf-inference.sh b/packages/tasks-gen/snippets-fixtures/automatic-speech-recognition/sh/curl/0.hf-inference.sh index 2e97440f52..e8d0320c0d 100644 --- a/packages/tasks-gen/snippets-fixtures/automatic-speech-recognition/sh/curl/0.hf-inference.sh +++ b/packages/tasks-gen/snippets-fixtures/automatic-speech-recognition/sh/curl/0.hf-inference.sh @@ -1,5 +1,5 @@ curl https://router.huggingface.co/hf-inference/models/openai/whisper-large-v3-turbo \ -X POST \ - -H 'Authorization: Bearer api_token' \ + -H "Authorization: Bearer $HF_TOKEN" \ -H 'Content-Type: audio/flac' \ --data-binary @"sample1.flac" \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/basic-snippet--token-classification/js/fetch/0.hf-inference.js b/packages/tasks-gen/snippets-fixtures/basic-snippet--token-classification/js/fetch/0.hf-inference.js index b3bbd33b48..9d221e3d59 100644 --- a/packages/tasks-gen/snippets-fixtures/basic-snippet--token-classification/js/fetch/0.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/basic-snippet--token-classification/js/fetch/0.hf-inference.js @@ -3,7 +3,7 @@ async function query(data) { "https://router.huggingface.co/hf-inference/models/FacebookAI/xlm-roberta-large-finetuned-conll03-english", { headers: { - Authorization: "Bearer api_token", + Authorization: `Bearer ${process.env.HF_TOKEN}`, "Content-Type": "application/json", }, method: "POST", diff --git a/packages/tasks-gen/snippets-fixtures/basic-snippet--token-classification/js/huggingface.js/0.hf-inference.js b/packages/tasks-gen/snippets-fixtures/basic-snippet--token-classification/js/huggingface.js/0.hf-inference.js index e7c26a97a5..78665602f3 100644 --- a/packages/tasks-gen/snippets-fixtures/basic-snippet--token-classification/js/huggingface.js/0.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/basic-snippet--token-classification/js/huggingface.js/0.hf-inference.js @@ -1,6 +1,6 @@ import { InferenceClient } from "@huggingface/inference"; -const client = new InferenceClient("api_token"); +const client = new InferenceClient(process.env.HF_TOKEN); const output = await client.tokenClassification({ model: "FacebookAI/xlm-roberta-large-finetuned-conll03-english", diff --git a/packages/tasks-gen/snippets-fixtures/basic-snippet--token-classification/python/huggingface_hub/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/basic-snippet--token-classification/python/huggingface_hub/0.hf-inference.py index 140ca7e9fd..01c475ec74 100644 --- a/packages/tasks-gen/snippets-fixtures/basic-snippet--token-classification/python/huggingface_hub/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/basic-snippet--token-classification/python/huggingface_hub/0.hf-inference.py @@ -1,8 +1,9 @@ +import os from huggingface_hub import InferenceClient client = InferenceClient( provider="hf-inference", - api_key="api_token", + api_key=os.environ["HF_TOKEN"], ) result = client.token_classification( diff --git a/packages/tasks-gen/snippets-fixtures/basic-snippet--token-classification/python/requests/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/basic-snippet--token-classification/python/requests/0.hf-inference.py index f6a479f213..a09054a28c 100644 --- a/packages/tasks-gen/snippets-fixtures/basic-snippet--token-classification/python/requests/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/basic-snippet--token-classification/python/requests/0.hf-inference.py @@ -1,8 +1,9 @@ +import os import requests API_URL = "https://router.huggingface.co/hf-inference/models/FacebookAI/xlm-roberta-large-finetuned-conll03-english" headers = { - "Authorization": "Bearer api_token", + "Authorization": f"Bearer {os.environ['HF_TOKEN']}", } def query(payload): diff --git a/packages/tasks-gen/snippets-fixtures/basic-snippet--token-classification/sh/curl/0.hf-inference.sh b/packages/tasks-gen/snippets-fixtures/basic-snippet--token-classification/sh/curl/0.hf-inference.sh index a3dc2df746..2f3ae7bc03 100644 --- a/packages/tasks-gen/snippets-fixtures/basic-snippet--token-classification/sh/curl/0.hf-inference.sh +++ b/packages/tasks-gen/snippets-fixtures/basic-snippet--token-classification/sh/curl/0.hf-inference.sh @@ -1,6 +1,6 @@ curl https://router.huggingface.co/hf-inference/models/FacebookAI/xlm-roberta-large-finetuned-conll03-english \ -X POST \ - -H 'Authorization: Bearer api_token' \ + -H "Authorization: Bearer $HF_TOKEN" \ -H 'Content-Type: application/json' \ -d '{ "inputs": "\"My name is Sarah Jessica Parker but you can call me Jessica\"" diff --git a/packages/tasks-gen/snippets-fixtures/bill-to-param/js/huggingface.js/0.hf-inference.js b/packages/tasks-gen/snippets-fixtures/bill-to-param/js/huggingface.js/0.hf-inference.js index 0b1a4e377e..6a63c8a9ed 100644 --- a/packages/tasks-gen/snippets-fixtures/bill-to-param/js/huggingface.js/0.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/bill-to-param/js/huggingface.js/0.hf-inference.js @@ -1,6 +1,6 @@ import { InferenceClient } from "@huggingface/inference"; -const client = new InferenceClient("api_token"); +const client = new InferenceClient(process.env.HF_TOKEN); const chatCompletion = await client.chatCompletion({ provider: "hf-inference", diff --git a/packages/tasks-gen/snippets-fixtures/bill-to-param/js/openai/0.hf-inference.js b/packages/tasks-gen/snippets-fixtures/bill-to-param/js/openai/0.hf-inference.js index 48fe640957..de5ed1cbf9 100644 --- a/packages/tasks-gen/snippets-fixtures/bill-to-param/js/openai/0.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/bill-to-param/js/openai/0.hf-inference.js @@ -2,7 +2,7 @@ import { OpenAI } from "openai"; const client = new OpenAI({ baseURL: "https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.1-8B-Instruct/v1", - apiKey: "api_token", + apiKey: process.env.HF_TOKEN, defaultHeaders: { "X-HF-Bill-To": "huggingface" } diff --git a/packages/tasks-gen/snippets-fixtures/bill-to-param/python/huggingface_hub/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/bill-to-param/python/huggingface_hub/0.hf-inference.py index 1bbfa87505..421cfccb69 100644 --- a/packages/tasks-gen/snippets-fixtures/bill-to-param/python/huggingface_hub/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/bill-to-param/python/huggingface_hub/0.hf-inference.py @@ -1,8 +1,9 @@ +import os from huggingface_hub import InferenceClient client = InferenceClient( provider="hf-inference", - api_key="api_token", + api_key=os.environ["HF_TOKEN"], bill_to="huggingface", ) diff --git a/packages/tasks-gen/snippets-fixtures/bill-to-param/python/openai/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/bill-to-param/python/openai/0.hf-inference.py index d83a69de8c..72133bc70f 100644 --- a/packages/tasks-gen/snippets-fixtures/bill-to-param/python/openai/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/bill-to-param/python/openai/0.hf-inference.py @@ -1,8 +1,9 @@ +import os from openai import OpenAI client = OpenAI( base_url="https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.1-8B-Instruct/v1", - api_key="api_token", + api_key=os.environ["HF_TOKEN"], default_headers={ "X-HF-Bill-To": "huggingface" } diff --git a/packages/tasks-gen/snippets-fixtures/bill-to-param/python/requests/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/bill-to-param/python/requests/0.hf-inference.py index 67773d5f38..e2fecb0de2 100644 --- a/packages/tasks-gen/snippets-fixtures/bill-to-param/python/requests/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/bill-to-param/python/requests/0.hf-inference.py @@ -1,8 +1,9 @@ +import os import requests API_URL = "https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.1-8B-Instruct/v1/chat/completions" headers = { - "Authorization": "Bearer api_token", + "Authorization": f"Bearer {os.environ['HF_TOKEN']}", "X-HF-Bill-To": "huggingface" } diff --git a/packages/tasks-gen/snippets-fixtures/bill-to-param/sh/curl/0.hf-inference.sh b/packages/tasks-gen/snippets-fixtures/bill-to-param/sh/curl/0.hf-inference.sh index b059bcc5a2..e6d5331dba 100644 --- a/packages/tasks-gen/snippets-fixtures/bill-to-param/sh/curl/0.hf-inference.sh +++ b/packages/tasks-gen/snippets-fixtures/bill-to-param/sh/curl/0.hf-inference.sh @@ -1,5 +1,5 @@ curl https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.1-8B-Instruct/v1/chat/completions \ - -H 'Authorization: Bearer api_token' \ + -H "Authorization: Bearer $HF_TOKEN" \ -H 'Content-Type: application/json' \ -H 'X-HF-Bill-To: huggingface' \ -d '{ diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/js/huggingface.js/0.hf-inference.js b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/js/huggingface.js/0.hf-inference.js index 5b86e73868..3dae10e38e 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/js/huggingface.js/0.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/js/huggingface.js/0.hf-inference.js @@ -1,6 +1,6 @@ import { InferenceClient } from "@huggingface/inference"; -const client = new InferenceClient("api_token"); +const client = new InferenceClient(process.env.HF_TOKEN); const chatCompletion = await client.chatCompletion({ provider: "hf-inference", diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/js/huggingface.js/0.together.js b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/js/huggingface.js/0.together.js index c5e3f526e5..d179ca7ec7 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/js/huggingface.js/0.together.js +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/js/huggingface.js/0.together.js @@ -1,6 +1,6 @@ import { InferenceClient } from "@huggingface/inference"; -const client = new InferenceClient("api_token"); +const client = new InferenceClient(process.env.HF_TOKEN); const chatCompletion = await client.chatCompletion({ provider: "together", diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/js/openai/0.hf-inference.js b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/js/openai/0.hf-inference.js index fd3565fee8..8ce44ee03f 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/js/openai/0.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/js/openai/0.hf-inference.js @@ -2,7 +2,7 @@ import { OpenAI } from "openai"; const client = new OpenAI({ baseURL: "https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.1-8B-Instruct/v1", - apiKey: "api_token", + apiKey: process.env.HF_TOKEN, }); const chatCompletion = await client.chat.completions.create({ diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/js/openai/0.together.js b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/js/openai/0.together.js index 608a366b04..0b88f47cf1 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/js/openai/0.together.js +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/js/openai/0.together.js @@ -2,7 +2,7 @@ import { OpenAI } from "openai"; const client = new OpenAI({ baseURL: "https://api.together.xyz/v1", - apiKey: "api_token", + apiKey: process.env.TOGETHER_API_KEY, }); const chatCompletion = await client.chat.completions.create({ diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/python/huggingface_hub/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/python/huggingface_hub/0.hf-inference.py index a36e591ce3..f96bbd79a4 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/python/huggingface_hub/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/python/huggingface_hub/0.hf-inference.py @@ -1,8 +1,9 @@ +import os from huggingface_hub import InferenceClient client = InferenceClient( provider="hf-inference", - api_key="api_token", + api_key=os.environ["HF_TOKEN"], ) completion = client.chat.completions.create( diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/python/huggingface_hub/0.together.py b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/python/huggingface_hub/0.together.py index 3787afa4ba..bb8ee2a614 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/python/huggingface_hub/0.together.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/python/huggingface_hub/0.together.py @@ -1,8 +1,9 @@ +import os from huggingface_hub import InferenceClient client = InferenceClient( provider="together", - api_key="api_token", + api_key=os.environ["HF_TOKEN"], ) completion = client.chat.completions.create( diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/python/openai/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/python/openai/0.hf-inference.py index 054d7718b8..4f9cb8152e 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/python/openai/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/python/openai/0.hf-inference.py @@ -1,8 +1,9 @@ +import os from openai import OpenAI client = OpenAI( base_url="https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.1-8B-Instruct/v1", - api_key="api_token", + api_key=os.environ["HF_TOKEN"], ) completion = client.chat.completions.create( diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/python/openai/0.together.py b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/python/openai/0.together.py index f834d18aa3..6b3b71305f 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/python/openai/0.together.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/python/openai/0.together.py @@ -1,8 +1,9 @@ +import os from openai import OpenAI client = OpenAI( base_url="https://api.together.xyz/v1", - api_key="api_token", + api_key=os.environ["TOGETHER_API_KEY"], ) completion = client.chat.completions.create( diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/python/requests/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/python/requests/0.hf-inference.py index 77f9440a5e..34e1f947ed 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/python/requests/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/python/requests/0.hf-inference.py @@ -1,8 +1,9 @@ +import os import requests API_URL = "https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.1-8B-Instruct/v1/chat/completions" headers = { - "Authorization": "Bearer api_token", + "Authorization": f"Bearer {os.environ['HF_TOKEN']}", } def query(payload): diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/python/requests/0.together.py b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/python/requests/0.together.py index eb1d80e1cb..8583441882 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/python/requests/0.together.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/python/requests/0.together.py @@ -1,8 +1,9 @@ +import os import requests API_URL = "https://api.together.xyz/v1/chat/completions" headers = { - "Authorization": "Bearer api_token", + "Authorization": f"Bearer {os.environ['TOGETHER_API_KEY']}", } def query(payload): diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/sh/curl/0.hf-inference.sh b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/sh/curl/0.hf-inference.sh index 83ffe438ac..e928a97f03 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/sh/curl/0.hf-inference.sh +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/sh/curl/0.hf-inference.sh @@ -1,5 +1,5 @@ curl https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.1-8B-Instruct/v1/chat/completions \ - -H 'Authorization: Bearer api_token' \ + -H "Authorization: Bearer $HF_TOKEN" \ -H 'Content-Type: application/json' \ -d '{ "messages": [ diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/sh/curl/0.together.sh b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/sh/curl/0.together.sh index a2a125618c..268f721daa 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/sh/curl/0.together.sh +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-non-stream/sh/curl/0.together.sh @@ -1,5 +1,5 @@ curl https://api.together.xyz/v1/chat/completions \ - -H 'Authorization: Bearer api_token' \ + -H "Authorization: Bearer $TOGETHER_API_KEY" \ -H 'Content-Type: application/json' \ -d '{ "messages": [ diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/js/huggingface.js/0.hf-inference.js b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/js/huggingface.js/0.hf-inference.js index 26153e369e..767faf03ca 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/js/huggingface.js/0.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/js/huggingface.js/0.hf-inference.js @@ -1,6 +1,6 @@ import { InferenceClient } from "@huggingface/inference"; -const client = new InferenceClient("api_token"); +const client = new InferenceClient(process.env.HF_TOKEN); let out = ""; diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/js/huggingface.js/0.together.js b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/js/huggingface.js/0.together.js index 3593f4b158..b5991971c1 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/js/huggingface.js/0.together.js +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/js/huggingface.js/0.together.js @@ -1,6 +1,6 @@ import { InferenceClient } from "@huggingface/inference"; -const client = new InferenceClient("api_token"); +const client = new InferenceClient(process.env.HF_TOKEN); let out = ""; diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/js/openai/0.hf-inference.js b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/js/openai/0.hf-inference.js index daac31dece..ec21ca01e1 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/js/openai/0.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/js/openai/0.hf-inference.js @@ -2,7 +2,7 @@ import { OpenAI } from "openai"; const client = new OpenAI({ baseURL: "https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.1-8B-Instruct/v1", - apiKey: "api_token", + apiKey: process.env.HF_TOKEN, }); const stream = await client.chat.completions.create({ diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/js/openai/0.together.js b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/js/openai/0.together.js index 9a667ff5dd..453992a01c 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/js/openai/0.together.js +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/js/openai/0.together.js @@ -2,7 +2,7 @@ import { OpenAI } from "openai"; const client = new OpenAI({ baseURL: "https://api.together.xyz/v1", - apiKey: "api_token", + apiKey: process.env.TOGETHER_API_KEY, }); const stream = await client.chat.completions.create({ diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/python/huggingface_hub/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/python/huggingface_hub/0.hf-inference.py index 6afd18b072..11c3ae48d7 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/python/huggingface_hub/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/python/huggingface_hub/0.hf-inference.py @@ -1,8 +1,9 @@ +import os from huggingface_hub import InferenceClient client = InferenceClient( provider="hf-inference", - api_key="api_token", + api_key=os.environ["HF_TOKEN"], ) stream = client.chat.completions.create( diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/python/huggingface_hub/0.together.py b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/python/huggingface_hub/0.together.py index c57fd81892..ab38d624ed 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/python/huggingface_hub/0.together.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/python/huggingface_hub/0.together.py @@ -1,8 +1,9 @@ +import os from huggingface_hub import InferenceClient client = InferenceClient( provider="together", - api_key="api_token", + api_key=os.environ["HF_TOKEN"], ) stream = client.chat.completions.create( diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/python/openai/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/python/openai/0.hf-inference.py index 92f2efddd4..ff6913fddf 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/python/openai/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/python/openai/0.hf-inference.py @@ -1,8 +1,9 @@ +import os from openai import OpenAI client = OpenAI( base_url="https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.1-8B-Instruct/v1", - api_key="api_token", + api_key=os.environ["HF_TOKEN"], ) stream = client.chat.completions.create( diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/python/openai/0.together.py b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/python/openai/0.together.py index e75d7df725..d199f8d88e 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/python/openai/0.together.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/python/openai/0.together.py @@ -1,8 +1,9 @@ +import os from openai import OpenAI client = OpenAI( base_url="https://api.together.xyz/v1", - api_key="api_token", + api_key=os.environ["TOGETHER_API_KEY"], ) stream = client.chat.completions.create( diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/python/requests/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/python/requests/0.hf-inference.py index b10332b22f..7bcd49ede4 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/python/requests/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/python/requests/0.hf-inference.py @@ -1,9 +1,10 @@ +import os import json import requests API_URL = "https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.1-8B-Instruct/v1/chat/completions" headers = { - "Authorization": "Bearer api_token", + "Authorization": f"Bearer {os.environ['HF_TOKEN']}", } def query(payload): diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/python/requests/0.together.py b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/python/requests/0.together.py index e97129f23e..f3cf193cd3 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/python/requests/0.together.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/python/requests/0.together.py @@ -1,9 +1,10 @@ +import os import json import requests API_URL = "https://api.together.xyz/v1/chat/completions" headers = { - "Authorization": "Bearer api_token", + "Authorization": f"Bearer {os.environ['TOGETHER_API_KEY']}", } def query(payload): diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/sh/curl/0.hf-inference.sh b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/sh/curl/0.hf-inference.sh index a04c1f97c5..99a34f2175 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/sh/curl/0.hf-inference.sh +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/sh/curl/0.hf-inference.sh @@ -1,5 +1,5 @@ curl https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.1-8B-Instruct/v1/chat/completions \ - -H 'Authorization: Bearer api_token' \ + -H "Authorization: Bearer $HF_TOKEN" \ -H 'Content-Type: application/json' \ -d '{ "messages": [ diff --git a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/sh/curl/0.together.sh b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/sh/curl/0.together.sh index 8c28b321d5..708320a076 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/sh/curl/0.together.sh +++ b/packages/tasks-gen/snippets-fixtures/conversational-llm-stream/sh/curl/0.together.sh @@ -1,5 +1,5 @@ curl https://api.together.xyz/v1/chat/completions \ - -H 'Authorization: Bearer api_token' \ + -H "Authorization: Bearer $TOGETHER_API_KEY" \ -H 'Content-Type: application/json' \ -d '{ "messages": [ diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/js/huggingface.js/0.fireworks-ai.js b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/js/huggingface.js/0.fireworks-ai.js index dd13449a1c..08252975cc 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/js/huggingface.js/0.fireworks-ai.js +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/js/huggingface.js/0.fireworks-ai.js @@ -1,6 +1,6 @@ import { InferenceClient } from "@huggingface/inference"; -const client = new InferenceClient("api_token"); +const client = new InferenceClient(process.env.FIREWORKS_AI_API_KEY); const chatCompletion = await client.chatCompletion({ provider: "fireworks-ai", diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/js/huggingface.js/0.hf-inference.js b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/js/huggingface.js/0.hf-inference.js index 54c64fe529..37f1115edf 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/js/huggingface.js/0.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/js/huggingface.js/0.hf-inference.js @@ -1,6 +1,6 @@ import { InferenceClient } from "@huggingface/inference"; -const client = new InferenceClient("api_token"); +const client = new InferenceClient(process.env.HF_TOKEN); const chatCompletion = await client.chatCompletion({ provider: "hf-inference", diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/js/openai/0.fireworks-ai.js b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/js/openai/0.fireworks-ai.js index e2e1380470..540065e2f6 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/js/openai/0.fireworks-ai.js +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/js/openai/0.fireworks-ai.js @@ -2,7 +2,7 @@ import { OpenAI } from "openai"; const client = new OpenAI({ baseURL: "https://api.fireworks.ai/inference/v1", - apiKey: "api_token", + apiKey: process.env.FIREWORKS_AI_API_KEY, }); const chatCompletion = await client.chat.completions.create({ diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/js/openai/0.hf-inference.js b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/js/openai/0.hf-inference.js index 1d25c8b032..fe86672048 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/js/openai/0.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/js/openai/0.hf-inference.js @@ -2,7 +2,7 @@ import { OpenAI } from "openai"; const client = new OpenAI({ baseURL: "https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.2-11B-Vision-Instruct/v1", - apiKey: "api_token", + apiKey: process.env.HF_TOKEN, }); const chatCompletion = await client.chat.completions.create({ diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/python/huggingface_hub/0.fireworks-ai.py b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/python/huggingface_hub/0.fireworks-ai.py index 8d23fe29f6..7c228773ae 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/python/huggingface_hub/0.fireworks-ai.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/python/huggingface_hub/0.fireworks-ai.py @@ -1,8 +1,9 @@ +import os from huggingface_hub import InferenceClient client = InferenceClient( provider="fireworks-ai", - api_key="api_token", + api_key=os.environ["FIREWORKS_AI_API_KEY"], ) completion = client.chat.completions.create( diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/python/huggingface_hub/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/python/huggingface_hub/0.hf-inference.py index 2fb4a4c9ad..a27035372e 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/python/huggingface_hub/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/python/huggingface_hub/0.hf-inference.py @@ -1,8 +1,9 @@ +import os from huggingface_hub import InferenceClient client = InferenceClient( provider="hf-inference", - api_key="api_token", + api_key=os.environ["HF_TOKEN"], ) completion = client.chat.completions.create( diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/python/openai/0.fireworks-ai.py b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/python/openai/0.fireworks-ai.py index 041fa12e27..24fac1a5e8 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/python/openai/0.fireworks-ai.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/python/openai/0.fireworks-ai.py @@ -1,8 +1,9 @@ +import os from openai import OpenAI client = OpenAI( base_url="https://api.fireworks.ai/inference/v1", - api_key="api_token", + api_key=os.environ["FIREWORKS_AI_API_KEY"], ) completion = client.chat.completions.create( diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/python/openai/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/python/openai/0.hf-inference.py index 1c8a26d9d3..b4ebc59e65 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/python/openai/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/python/openai/0.hf-inference.py @@ -1,8 +1,9 @@ +import os from openai import OpenAI client = OpenAI( base_url="https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.2-11B-Vision-Instruct/v1", - api_key="api_token", + api_key=os.environ["HF_TOKEN"], ) completion = client.chat.completions.create( diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/python/requests/0.fireworks-ai.py b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/python/requests/0.fireworks-ai.py index be830e0f16..f54678402b 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/python/requests/0.fireworks-ai.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/python/requests/0.fireworks-ai.py @@ -1,8 +1,9 @@ +import os import requests API_URL = "https://api.fireworks.ai/inference/v1/chat/completions" headers = { - "Authorization": "Bearer api_token", + "Authorization": f"Bearer {os.environ['FIREWORKS_AI_API_KEY']}", } def query(payload): diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/python/requests/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/python/requests/0.hf-inference.py index fab72aacef..d9ad0ec12a 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/python/requests/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/python/requests/0.hf-inference.py @@ -1,8 +1,9 @@ +import os import requests API_URL = "https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.2-11B-Vision-Instruct/v1/chat/completions" headers = { - "Authorization": "Bearer api_token", + "Authorization": f"Bearer {os.environ['HF_TOKEN']}", } def query(payload): diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/sh/curl/0.fireworks-ai.sh b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/sh/curl/0.fireworks-ai.sh index 1f195d20a9..4d9bcbb84c 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/sh/curl/0.fireworks-ai.sh +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/sh/curl/0.fireworks-ai.sh @@ -1,5 +1,5 @@ curl https://api.fireworks.ai/inference/v1/chat/completions \ - -H 'Authorization: Bearer api_token' \ + -H "Authorization: Bearer $FIREWORKS_AI_API_KEY" \ -H 'Content-Type: application/json' \ -d '{ "messages": [ diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/sh/curl/0.hf-inference.sh b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/sh/curl/0.hf-inference.sh index a0c8d4e854..1cf0864848 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/sh/curl/0.hf-inference.sh +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/sh/curl/0.hf-inference.sh @@ -1,5 +1,5 @@ curl https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.2-11B-Vision-Instruct/v1/chat/completions \ - -H 'Authorization: Bearer api_token' \ + -H "Authorization: Bearer $HF_TOKEN" \ -H 'Content-Type: application/json' \ -d '{ "messages": [ diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/js/huggingface.js/0.fireworks-ai.js b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/js/huggingface.js/0.fireworks-ai.js index 4f6bfed66e..0565060cac 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/js/huggingface.js/0.fireworks-ai.js +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/js/huggingface.js/0.fireworks-ai.js @@ -1,6 +1,6 @@ import { InferenceClient } from "@huggingface/inference"; -const client = new InferenceClient("api_token"); +const client = new InferenceClient(process.env.FIREWORKS_AI_API_KEY); let out = ""; diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/js/huggingface.js/0.hf-inference.js b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/js/huggingface.js/0.hf-inference.js index eddfa72b9b..c778810a64 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/js/huggingface.js/0.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/js/huggingface.js/0.hf-inference.js @@ -1,6 +1,6 @@ import { InferenceClient } from "@huggingface/inference"; -const client = new InferenceClient("api_token"); +const client = new InferenceClient(process.env.HF_TOKEN); let out = ""; diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/js/openai/0.fireworks-ai.js b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/js/openai/0.fireworks-ai.js index 1d1c7ad7d0..3a5eabdff4 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/js/openai/0.fireworks-ai.js +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/js/openai/0.fireworks-ai.js @@ -2,7 +2,7 @@ import { OpenAI } from "openai"; const client = new OpenAI({ baseURL: "https://api.fireworks.ai/inference/v1", - apiKey: "api_token", + apiKey: process.env.FIREWORKS_AI_API_KEY, }); const stream = await client.chat.completions.create({ diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/js/openai/0.hf-inference.js b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/js/openai/0.hf-inference.js index 64557f77e9..ea31e93f09 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/js/openai/0.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/js/openai/0.hf-inference.js @@ -2,7 +2,7 @@ import { OpenAI } from "openai"; const client = new OpenAI({ baseURL: "https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.2-11B-Vision-Instruct/v1", - apiKey: "api_token", + apiKey: process.env.HF_TOKEN, }); const stream = await client.chat.completions.create({ diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/python/huggingface_hub/0.fireworks-ai.py b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/python/huggingface_hub/0.fireworks-ai.py index bd68cc9b25..4cae631fe2 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/python/huggingface_hub/0.fireworks-ai.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/python/huggingface_hub/0.fireworks-ai.py @@ -1,8 +1,9 @@ +import os from huggingface_hub import InferenceClient client = InferenceClient( provider="fireworks-ai", - api_key="api_token", + api_key=os.environ["FIREWORKS_AI_API_KEY"], ) stream = client.chat.completions.create( diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/python/huggingface_hub/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/python/huggingface_hub/0.hf-inference.py index 349b1ad538..2738bd299e 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/python/huggingface_hub/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/python/huggingface_hub/0.hf-inference.py @@ -1,8 +1,9 @@ +import os from huggingface_hub import InferenceClient client = InferenceClient( provider="hf-inference", - api_key="api_token", + api_key=os.environ["HF_TOKEN"], ) stream = client.chat.completions.create( diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/python/openai/0.fireworks-ai.py b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/python/openai/0.fireworks-ai.py index 0d7768ac57..d767b844c0 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/python/openai/0.fireworks-ai.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/python/openai/0.fireworks-ai.py @@ -1,8 +1,9 @@ +import os from openai import OpenAI client = OpenAI( base_url="https://api.fireworks.ai/inference/v1", - api_key="api_token", + api_key=os.environ["FIREWORKS_AI_API_KEY"], ) stream = client.chat.completions.create( diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/python/openai/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/python/openai/0.hf-inference.py index 89e029f454..aa791d2288 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/python/openai/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/python/openai/0.hf-inference.py @@ -1,8 +1,9 @@ +import os from openai import OpenAI client = OpenAI( base_url="https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.2-11B-Vision-Instruct/v1", - api_key="api_token", + api_key=os.environ["HF_TOKEN"], ) stream = client.chat.completions.create( diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/python/requests/0.fireworks-ai.py b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/python/requests/0.fireworks-ai.py index 352dcd0abc..eaf0d5a9dd 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/python/requests/0.fireworks-ai.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/python/requests/0.fireworks-ai.py @@ -1,9 +1,10 @@ +import os import json import requests API_URL = "https://api.fireworks.ai/inference/v1/chat/completions" headers = { - "Authorization": "Bearer api_token", + "Authorization": f"Bearer {os.environ['FIREWORKS_AI_API_KEY']}", } def query(payload): diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/python/requests/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/python/requests/0.hf-inference.py index c646d38bee..fb61a7346b 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/python/requests/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/python/requests/0.hf-inference.py @@ -1,9 +1,10 @@ +import os import json import requests API_URL = "https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.2-11B-Vision-Instruct/v1/chat/completions" headers = { - "Authorization": "Bearer api_token", + "Authorization": f"Bearer {os.environ['HF_TOKEN']}", } def query(payload): diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/sh/curl/0.fireworks-ai.sh b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/sh/curl/0.fireworks-ai.sh index 70a74e6eb9..ea8afe017d 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/sh/curl/0.fireworks-ai.sh +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/sh/curl/0.fireworks-ai.sh @@ -1,5 +1,5 @@ curl https://api.fireworks.ai/inference/v1/chat/completions \ - -H 'Authorization: Bearer api_token' \ + -H "Authorization: Bearer $FIREWORKS_AI_API_KEY" \ -H 'Content-Type: application/json' \ -d '{ "messages": [ diff --git a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/sh/curl/0.hf-inference.sh b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/sh/curl/0.hf-inference.sh index 994794a965..ecc44c691c 100644 --- a/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/sh/curl/0.hf-inference.sh +++ b/packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/sh/curl/0.hf-inference.sh @@ -1,5 +1,5 @@ curl https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.2-11B-Vision-Instruct/v1/chat/completions \ - -H 'Authorization: Bearer api_token' \ + -H "Authorization: Bearer $HF_TOKEN" \ -H 'Content-Type: application/json' \ -d '{ "messages": [ diff --git a/packages/tasks-gen/snippets-fixtures/document-question-answering/python/huggingface_hub/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/document-question-answering/python/huggingface_hub/0.hf-inference.py index 738921cc13..28198a8935 100644 --- a/packages/tasks-gen/snippets-fixtures/document-question-answering/python/huggingface_hub/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/document-question-answering/python/huggingface_hub/0.hf-inference.py @@ -1,8 +1,9 @@ +import os from huggingface_hub import InferenceClient client = InferenceClient( provider="hf-inference", - api_key="api_token", + api_key=os.environ["HF_TOKEN"], ) output = client.document_question_answering( diff --git a/packages/tasks-gen/snippets-fixtures/document-question-answering/python/requests/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/document-question-answering/python/requests/0.hf-inference.py index 0e19e189f2..6e0c8c6ea1 100644 --- a/packages/tasks-gen/snippets-fixtures/document-question-answering/python/requests/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/document-question-answering/python/requests/0.hf-inference.py @@ -1,9 +1,10 @@ +import os import base64 import requests API_URL = "https://router.huggingface.co/hf-inference/models/impira/layoutlm-invoices" headers = { - "Authorization": "Bearer api_token", + "Authorization": f"Bearer {os.environ['HF_TOKEN']}", } def query(payload): diff --git a/packages/tasks-gen/snippets-fixtures/feature-extraction/js/fetch/0.hf-inference.js b/packages/tasks-gen/snippets-fixtures/feature-extraction/js/fetch/0.hf-inference.js index 34ebc0236c..f4783189f6 100644 --- a/packages/tasks-gen/snippets-fixtures/feature-extraction/js/fetch/0.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/feature-extraction/js/fetch/0.hf-inference.js @@ -3,7 +3,7 @@ async function query(data) { "https://router.huggingface.co/hf-inference/models/intfloat/multilingual-e5-large-instruct/pipeline/feature-extraction", { headers: { - Authorization: "Bearer api_token", + Authorization: `Bearer ${process.env.HF_TOKEN}`, "Content-Type": "application/json", }, method: "POST", diff --git a/packages/tasks-gen/snippets-fixtures/feature-extraction/js/huggingface.js/0.hf-inference.js b/packages/tasks-gen/snippets-fixtures/feature-extraction/js/huggingface.js/0.hf-inference.js index a1bd0c7b57..30da6e8e0a 100644 --- a/packages/tasks-gen/snippets-fixtures/feature-extraction/js/huggingface.js/0.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/feature-extraction/js/huggingface.js/0.hf-inference.js @@ -1,6 +1,6 @@ import { InferenceClient } from "@huggingface/inference"; -const client = new InferenceClient("api_token"); +const client = new InferenceClient(process.env.HF_TOKEN); const output = await client.featureExtraction({ model: "intfloat/multilingual-e5-large-instruct", diff --git a/packages/tasks-gen/snippets-fixtures/feature-extraction/python/huggingface_hub/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/feature-extraction/python/huggingface_hub/0.hf-inference.py index 221113ce8e..1e13d73914 100644 --- a/packages/tasks-gen/snippets-fixtures/feature-extraction/python/huggingface_hub/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/feature-extraction/python/huggingface_hub/0.hf-inference.py @@ -1,8 +1,9 @@ +import os from huggingface_hub import InferenceClient client = InferenceClient( provider="hf-inference", - api_key="api_token", + api_key=os.environ["HF_TOKEN"], ) result = client.feature_extraction( diff --git a/packages/tasks-gen/snippets-fixtures/feature-extraction/python/requests/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/feature-extraction/python/requests/0.hf-inference.py index 91338626ca..8fc0c08ca4 100644 --- a/packages/tasks-gen/snippets-fixtures/feature-extraction/python/requests/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/feature-extraction/python/requests/0.hf-inference.py @@ -1,8 +1,9 @@ +import os import requests API_URL = "https://router.huggingface.co/hf-inference/models/intfloat/multilingual-e5-large-instruct/pipeline/feature-extraction" headers = { - "Authorization": "Bearer api_token", + "Authorization": f"Bearer {os.environ['HF_TOKEN']}", } def query(payload): diff --git a/packages/tasks-gen/snippets-fixtures/feature-extraction/sh/curl/0.hf-inference.sh b/packages/tasks-gen/snippets-fixtures/feature-extraction/sh/curl/0.hf-inference.sh index 4c3cc578d1..092aa46ecd 100644 --- a/packages/tasks-gen/snippets-fixtures/feature-extraction/sh/curl/0.hf-inference.sh +++ b/packages/tasks-gen/snippets-fixtures/feature-extraction/sh/curl/0.hf-inference.sh @@ -1,6 +1,6 @@ curl https://router.huggingface.co/hf-inference/models/intfloat/multilingual-e5-large-instruct/pipeline/feature-extraction \ -X POST \ - -H 'Authorization: Bearer api_token' \ + -H "Authorization: Bearer $HF_TOKEN" \ -H 'Content-Type: application/json' \ -d '{ "inputs": "\"Today is a sunny day and I will get some ice cream.\"" diff --git a/packages/tasks-gen/snippets-fixtures/image-classification/js/fetch/0.hf-inference.js b/packages/tasks-gen/snippets-fixtures/image-classification/js/fetch/0.hf-inference.js index f7cb102cab..aa34f2add8 100644 --- a/packages/tasks-gen/snippets-fixtures/image-classification/js/fetch/0.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/image-classification/js/fetch/0.hf-inference.js @@ -3,7 +3,7 @@ async function query(data) { "https://router.huggingface.co/hf-inference/models/Falconsai/nsfw_image_detection", { headers: { - Authorization: "Bearer api_token", + Authorization: `Bearer ${process.env.HF_TOKEN}`, "Content-Type": "image/jpeg", }, method: "POST", diff --git a/packages/tasks-gen/snippets-fixtures/image-classification/js/huggingface.js/0.hf-inference.js b/packages/tasks-gen/snippets-fixtures/image-classification/js/huggingface.js/0.hf-inference.js index d0dad08a13..a9abb0646d 100644 --- a/packages/tasks-gen/snippets-fixtures/image-classification/js/huggingface.js/0.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/image-classification/js/huggingface.js/0.hf-inference.js @@ -1,6 +1,6 @@ import { InferenceClient } from "@huggingface/inference"; -const client = new InferenceClient("api_token"); +const client = new InferenceClient(process.env.HF_TOKEN); const data = fs.readFileSync("cats.jpg"); diff --git a/packages/tasks-gen/snippets-fixtures/image-classification/python/huggingface_hub/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/image-classification/python/huggingface_hub/0.hf-inference.py index 746252516b..b48502d3a0 100644 --- a/packages/tasks-gen/snippets-fixtures/image-classification/python/huggingface_hub/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/image-classification/python/huggingface_hub/0.hf-inference.py @@ -1,8 +1,9 @@ +import os from huggingface_hub import InferenceClient client = InferenceClient( provider="hf-inference", - api_key="api_token", + api_key=os.environ["HF_TOKEN"], ) output = client.image_classification("cats.jpg", model="Falconsai/nsfw_image_detection") \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/image-classification/python/requests/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/image-classification/python/requests/0.hf-inference.py index 59ddb6fa1b..e0d9ac54ee 100644 --- a/packages/tasks-gen/snippets-fixtures/image-classification/python/requests/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/image-classification/python/requests/0.hf-inference.py @@ -1,8 +1,9 @@ +import os import requests API_URL = "https://router.huggingface.co/hf-inference/models/Falconsai/nsfw_image_detection" headers = { - "Authorization": "Bearer api_token", + "Authorization": f"Bearer {os.environ['HF_TOKEN']}", } def query(filename): diff --git a/packages/tasks-gen/snippets-fixtures/image-classification/sh/curl/0.hf-inference.sh b/packages/tasks-gen/snippets-fixtures/image-classification/sh/curl/0.hf-inference.sh index d2b438ffd8..57cb83f10d 100644 --- a/packages/tasks-gen/snippets-fixtures/image-classification/sh/curl/0.hf-inference.sh +++ b/packages/tasks-gen/snippets-fixtures/image-classification/sh/curl/0.hf-inference.sh @@ -1,5 +1,5 @@ curl https://router.huggingface.co/hf-inference/models/Falconsai/nsfw_image_detection \ -X POST \ - -H 'Authorization: Bearer api_token' \ + -H "Authorization: Bearer $HF_TOKEN" \ -H 'Content-Type: image/jpeg' \ --data-binary @"cats.jpg" \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/image-to-image/python/huggingface_hub/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/image-to-image/python/huggingface_hub/0.hf-inference.py index dc89746110..1f94f838a8 100644 --- a/packages/tasks-gen/snippets-fixtures/image-to-image/python/huggingface_hub/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/image-to-image/python/huggingface_hub/0.hf-inference.py @@ -1,8 +1,9 @@ +import os from huggingface_hub import InferenceClient client = InferenceClient( provider="hf-inference", - api_key="api_token", + api_key=os.environ["HF_TOKEN"], ) # output is a PIL.Image object diff --git a/packages/tasks-gen/snippets-fixtures/image-to-image/python/requests/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/image-to-image/python/requests/0.hf-inference.py index de58f31953..eb9e981c6e 100644 --- a/packages/tasks-gen/snippets-fixtures/image-to-image/python/requests/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/image-to-image/python/requests/0.hf-inference.py @@ -1,9 +1,10 @@ +import os import base64 import requests API_URL = "https://router.huggingface.co/hf-inference/models/stabilityai/stable-diffusion-xl-refiner-1.0" headers = { - "Authorization": "Bearer api_token", + "Authorization": f"Bearer {os.environ['HF_TOKEN']}", } def query(payload): diff --git a/packages/tasks-gen/snippets-fixtures/question-answering/python/huggingface_hub/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/question-answering/python/huggingface_hub/0.hf-inference.py index d3d4288e7e..e1d6396474 100644 --- a/packages/tasks-gen/snippets-fixtures/question-answering/python/huggingface_hub/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/question-answering/python/huggingface_hub/0.hf-inference.py @@ -1,8 +1,9 @@ +import os from huggingface_hub import InferenceClient client = InferenceClient( provider="hf-inference", - api_key="api_token", + api_key=os.environ["HF_TOKEN"], ) answer = client.question_answering( diff --git a/packages/tasks-gen/snippets-fixtures/table-question-answering/python/huggingface_hub/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/table-question-answering/python/huggingface_hub/0.hf-inference.py index fb9c5d53d3..7bea56b197 100644 --- a/packages/tasks-gen/snippets-fixtures/table-question-answering/python/huggingface_hub/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/table-question-answering/python/huggingface_hub/0.hf-inference.py @@ -1,8 +1,9 @@ +import os from huggingface_hub import InferenceClient client = InferenceClient( provider="hf-inference", - api_key="api_token", + api_key=os.environ["HF_TOKEN"], ) answer = client.question_answering( diff --git a/packages/tasks-gen/snippets-fixtures/tabular/python/requests/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/tabular/python/requests/0.hf-inference.py index 39322c87cd..4fa43785ec 100644 --- a/packages/tasks-gen/snippets-fixtures/tabular/python/requests/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/tabular/python/requests/0.hf-inference.py @@ -1,8 +1,9 @@ +import os import requests API_URL = "https://router.huggingface.co/hf-inference/models/templates/tabular-classification" headers = { - "Authorization": "Bearer api_token", + "Authorization": f"Bearer {os.environ['HF_TOKEN']}", } def query(payload): diff --git a/packages/tasks-gen/snippets-fixtures/text-classification/js/fetch/0.hf-inference.js b/packages/tasks-gen/snippets-fixtures/text-classification/js/fetch/0.hf-inference.js index bc400258cd..727bd05971 100644 --- a/packages/tasks-gen/snippets-fixtures/text-classification/js/fetch/0.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/text-classification/js/fetch/0.hf-inference.js @@ -3,7 +3,7 @@ async function query(data) { "https://router.huggingface.co/hf-inference/models/distilbert/distilbert-base-uncased-finetuned-sst-2-english", { headers: { - Authorization: "Bearer api_token", + Authorization: `Bearer ${process.env.HF_TOKEN}`, "Content-Type": "application/json", }, method: "POST", diff --git a/packages/tasks-gen/snippets-fixtures/text-classification/js/huggingface.js/0.hf-inference.js b/packages/tasks-gen/snippets-fixtures/text-classification/js/huggingface.js/0.hf-inference.js index b3573afd30..59c440a6b1 100644 --- a/packages/tasks-gen/snippets-fixtures/text-classification/js/huggingface.js/0.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/text-classification/js/huggingface.js/0.hf-inference.js @@ -1,6 +1,6 @@ import { InferenceClient } from "@huggingface/inference"; -const client = new InferenceClient("api_token"); +const client = new InferenceClient(process.env.HF_TOKEN); const output = await client.textClassification({ model: "distilbert/distilbert-base-uncased-finetuned-sst-2-english", diff --git a/packages/tasks-gen/snippets-fixtures/text-classification/python/huggingface_hub/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/text-classification/python/huggingface_hub/0.hf-inference.py index 2eb859c289..98f7058a89 100644 --- a/packages/tasks-gen/snippets-fixtures/text-classification/python/huggingface_hub/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/text-classification/python/huggingface_hub/0.hf-inference.py @@ -1,8 +1,9 @@ +import os from huggingface_hub import InferenceClient client = InferenceClient( provider="hf-inference", - api_key="api_token", + api_key=os.environ["HF_TOKEN"], ) result = client.text_classification( diff --git a/packages/tasks-gen/snippets-fixtures/text-classification/python/requests/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/text-classification/python/requests/0.hf-inference.py index a4f3d0a110..411244b399 100644 --- a/packages/tasks-gen/snippets-fixtures/text-classification/python/requests/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/text-classification/python/requests/0.hf-inference.py @@ -1,8 +1,9 @@ +import os import requests API_URL = "https://router.huggingface.co/hf-inference/models/distilbert/distilbert-base-uncased-finetuned-sst-2-english" headers = { - "Authorization": "Bearer api_token", + "Authorization": f"Bearer {os.environ['HF_TOKEN']}", } def query(payload): diff --git a/packages/tasks-gen/snippets-fixtures/text-classification/sh/curl/0.hf-inference.sh b/packages/tasks-gen/snippets-fixtures/text-classification/sh/curl/0.hf-inference.sh index 6f2460d330..39fbd62c2f 100644 --- a/packages/tasks-gen/snippets-fixtures/text-classification/sh/curl/0.hf-inference.sh +++ b/packages/tasks-gen/snippets-fixtures/text-classification/sh/curl/0.hf-inference.sh @@ -1,6 +1,6 @@ curl https://router.huggingface.co/hf-inference/models/distilbert/distilbert-base-uncased-finetuned-sst-2-english \ -X POST \ - -H 'Authorization: Bearer api_token' \ + -H "Authorization: Bearer $HF_TOKEN" \ -H 'Content-Type: application/json' \ -d '{ "inputs": "\"I like you. I love you\"" diff --git a/packages/tasks-gen/snippets-fixtures/text-to-audio-transformers/js/fetch/0.hf-inference.js b/packages/tasks-gen/snippets-fixtures/text-to-audio-transformers/js/fetch/0.hf-inference.js index 4b9aadea01..4d22a3a637 100644 --- a/packages/tasks-gen/snippets-fixtures/text-to-audio-transformers/js/fetch/0.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/text-to-audio-transformers/js/fetch/0.hf-inference.js @@ -3,7 +3,7 @@ async function query(data) { "https://router.huggingface.co/hf-inference/models/facebook/musicgen-small", { headers: { - Authorization: "Bearer api_token", + Authorization: `Bearer ${process.env.HF_TOKEN}`, "Content-Type": "application/json", }, method: "POST", diff --git a/packages/tasks-gen/snippets-fixtures/text-to-audio-transformers/python/requests/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/text-to-audio-transformers/python/requests/0.hf-inference.py index 7fdba6fc9c..ff877beaa5 100644 --- a/packages/tasks-gen/snippets-fixtures/text-to-audio-transformers/python/requests/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/text-to-audio-transformers/python/requests/0.hf-inference.py @@ -1,8 +1,9 @@ +import os import requests API_URL = "https://router.huggingface.co/hf-inference/models/facebook/musicgen-small" headers = { - "Authorization": "Bearer api_token", + "Authorization": f"Bearer {os.environ['HF_TOKEN']}", } def query(payload): diff --git a/packages/tasks-gen/snippets-fixtures/text-to-image--lora/js/fetch/0.fal-ai.js b/packages/tasks-gen/snippets-fixtures/text-to-image--lora/js/fetch/0.fal-ai.js index 6fe4d626cf..a2d25b25de 100644 --- a/packages/tasks-gen/snippets-fixtures/text-to-image--lora/js/fetch/0.fal-ai.js +++ b/packages/tasks-gen/snippets-fixtures/text-to-image--lora/js/fetch/0.fal-ai.js @@ -3,7 +3,7 @@ async function query(data) { "https://fal.run/", { headers: { - Authorization: "Key api_token", + Authorization: `Key ${process.env.FAL_AI_API_KEY}`, "Content-Type": "application/json", }, method: "POST", diff --git a/packages/tasks-gen/snippets-fixtures/text-to-image--lora/js/huggingface.js/0.fal-ai.js b/packages/tasks-gen/snippets-fixtures/text-to-image--lora/js/huggingface.js/0.fal-ai.js index f3edef9840..e07300f4ad 100644 --- a/packages/tasks-gen/snippets-fixtures/text-to-image--lora/js/huggingface.js/0.fal-ai.js +++ b/packages/tasks-gen/snippets-fixtures/text-to-image--lora/js/huggingface.js/0.fal-ai.js @@ -1,6 +1,6 @@ import { InferenceClient } from "@huggingface/inference"; -const client = new InferenceClient("api_token"); +const client = new InferenceClient(process.env.HF_TOKEN); const image = await client.textToImage({ provider: "fal-ai", diff --git a/packages/tasks-gen/snippets-fixtures/text-to-image--lora/python/huggingface_hub/0.fal-ai.py b/packages/tasks-gen/snippets-fixtures/text-to-image--lora/python/huggingface_hub/0.fal-ai.py index d7b3606fb5..f1a80ac0aa 100644 --- a/packages/tasks-gen/snippets-fixtures/text-to-image--lora/python/huggingface_hub/0.fal-ai.py +++ b/packages/tasks-gen/snippets-fixtures/text-to-image--lora/python/huggingface_hub/0.fal-ai.py @@ -1,8 +1,9 @@ +import os from huggingface_hub import InferenceClient client = InferenceClient( provider="fal-ai", - api_key="api_token", + api_key=os.environ["HF_TOKEN"], ) # output is a PIL.Image object diff --git a/packages/tasks-gen/snippets-fixtures/text-to-image/js/fetch/0.fal-ai.js b/packages/tasks-gen/snippets-fixtures/text-to-image/js/fetch/0.fal-ai.js index 349941d7dc..1b8d65c886 100644 --- a/packages/tasks-gen/snippets-fixtures/text-to-image/js/fetch/0.fal-ai.js +++ b/packages/tasks-gen/snippets-fixtures/text-to-image/js/fetch/0.fal-ai.js @@ -3,7 +3,7 @@ async function query(data) { "https://fal.run/", { headers: { - Authorization: "Key api_token", + Authorization: `Key ${process.env.FAL_AI_API_KEY}`, "Content-Type": "application/json", }, method: "POST", diff --git a/packages/tasks-gen/snippets-fixtures/text-to-image/js/fetch/0.hf-inference.js b/packages/tasks-gen/snippets-fixtures/text-to-image/js/fetch/0.hf-inference.js index 786f87e6fb..31488af8d7 100644 --- a/packages/tasks-gen/snippets-fixtures/text-to-image/js/fetch/0.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/text-to-image/js/fetch/0.hf-inference.js @@ -3,7 +3,7 @@ async function query(data) { "https://router.huggingface.co/hf-inference/models/black-forest-labs/FLUX.1-schnell", { headers: { - Authorization: "Bearer api_token", + Authorization: `Bearer ${process.env.HF_TOKEN}`, "Content-Type": "application/json", }, method: "POST", diff --git a/packages/tasks-gen/snippets-fixtures/text-to-image/js/huggingface.js/0.fal-ai.js b/packages/tasks-gen/snippets-fixtures/text-to-image/js/huggingface.js/0.fal-ai.js index 6aad319667..df248926ab 100644 --- a/packages/tasks-gen/snippets-fixtures/text-to-image/js/huggingface.js/0.fal-ai.js +++ b/packages/tasks-gen/snippets-fixtures/text-to-image/js/huggingface.js/0.fal-ai.js @@ -1,6 +1,6 @@ import { InferenceClient } from "@huggingface/inference"; -const client = new InferenceClient("api_token"); +const client = new InferenceClient(process.env.HF_TOKEN); const image = await client.textToImage({ provider: "fal-ai", diff --git a/packages/tasks-gen/snippets-fixtures/text-to-image/js/huggingface.js/0.hf-inference.js b/packages/tasks-gen/snippets-fixtures/text-to-image/js/huggingface.js/0.hf-inference.js index 5eb6a27e6b..2d904c8d24 100644 --- a/packages/tasks-gen/snippets-fixtures/text-to-image/js/huggingface.js/0.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/text-to-image/js/huggingface.js/0.hf-inference.js @@ -1,6 +1,6 @@ import { InferenceClient } from "@huggingface/inference"; -const client = new InferenceClient("api_token"); +const client = new InferenceClient(process.env.HF_TOKEN); const image = await client.textToImage({ provider: "hf-inference", diff --git a/packages/tasks-gen/snippets-fixtures/text-to-image/python/huggingface_hub/0.fal-ai.py b/packages/tasks-gen/snippets-fixtures/text-to-image/python/huggingface_hub/0.fal-ai.py index f70ff34f3a..3b4d453806 100644 --- a/packages/tasks-gen/snippets-fixtures/text-to-image/python/huggingface_hub/0.fal-ai.py +++ b/packages/tasks-gen/snippets-fixtures/text-to-image/python/huggingface_hub/0.fal-ai.py @@ -1,8 +1,9 @@ +import os from huggingface_hub import InferenceClient client = InferenceClient( provider="fal-ai", - api_key="api_token", + api_key=os.environ["HF_TOKEN"], ) # output is a PIL.Image object diff --git a/packages/tasks-gen/snippets-fixtures/text-to-image/python/huggingface_hub/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/text-to-image/python/huggingface_hub/0.hf-inference.py index 84ce49c29c..1ab5f80ecc 100644 --- a/packages/tasks-gen/snippets-fixtures/text-to-image/python/huggingface_hub/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/text-to-image/python/huggingface_hub/0.hf-inference.py @@ -1,8 +1,9 @@ +import os from huggingface_hub import InferenceClient client = InferenceClient( provider="hf-inference", - api_key="api_token", + api_key=os.environ["HF_TOKEN"], ) # output is a PIL.Image object diff --git a/packages/tasks-gen/snippets-fixtures/text-to-image/python/requests/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/text-to-image/python/requests/0.hf-inference.py index 97ca138dc1..fe7315fa42 100644 --- a/packages/tasks-gen/snippets-fixtures/text-to-image/python/requests/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/text-to-image/python/requests/0.hf-inference.py @@ -1,8 +1,9 @@ +import os import requests API_URL = "https://router.huggingface.co/hf-inference/models/black-forest-labs/FLUX.1-schnell" headers = { - "Authorization": "Bearer api_token", + "Authorization": f"Bearer {os.environ['HF_TOKEN']}", } def query(payload): diff --git a/packages/tasks-gen/snippets-fixtures/text-to-speech/js/fetch/0.fal-ai.js b/packages/tasks-gen/snippets-fixtures/text-to-speech/js/fetch/0.fal-ai.js index 604f5367b9..e6ae2e3bab 100644 --- a/packages/tasks-gen/snippets-fixtures/text-to-speech/js/fetch/0.fal-ai.js +++ b/packages/tasks-gen/snippets-fixtures/text-to-speech/js/fetch/0.fal-ai.js @@ -3,7 +3,7 @@ async function query(data) { "https://fal.run/", { headers: { - Authorization: "Key api_token", + Authorization: `Key ${process.env.FAL_AI_API_KEY}`, "Content-Type": "application/json", }, method: "POST", diff --git a/packages/tasks-gen/snippets-fixtures/text-to-speech/js/huggingface.js/0.fal-ai.js b/packages/tasks-gen/snippets-fixtures/text-to-speech/js/huggingface.js/0.fal-ai.js index 2b73c4f0a4..6e4bc90576 100644 --- a/packages/tasks-gen/snippets-fixtures/text-to-speech/js/huggingface.js/0.fal-ai.js +++ b/packages/tasks-gen/snippets-fixtures/text-to-speech/js/huggingface.js/0.fal-ai.js @@ -1,6 +1,6 @@ import { InferenceClient } from "@huggingface/inference"; -const client = new InferenceClient("api_token"); +const client = new InferenceClient(process.env.HF_TOKEN); const audio = await client.textToSpeech({ provider: "fal-ai", diff --git a/packages/tasks-gen/snippets-fixtures/text-to-speech/python/huggingface_hub/0.fal-ai.py b/packages/tasks-gen/snippets-fixtures/text-to-speech/python/huggingface_hub/0.fal-ai.py index 206d82914d..287a4b3880 100644 --- a/packages/tasks-gen/snippets-fixtures/text-to-speech/python/huggingface_hub/0.fal-ai.py +++ b/packages/tasks-gen/snippets-fixtures/text-to-speech/python/huggingface_hub/0.fal-ai.py @@ -1,8 +1,9 @@ +import os from huggingface_hub import InferenceClient client = InferenceClient( provider="fal-ai", - api_key="api_token", + api_key=os.environ["HF_TOKEN"], ) # audio is returned as bytes diff --git a/packages/tasks-gen/snippets-fixtures/text-to-speech/python/requests/0.fal-ai.py b/packages/tasks-gen/snippets-fixtures/text-to-speech/python/requests/0.fal-ai.py index 39cf5a672a..25078707da 100644 --- a/packages/tasks-gen/snippets-fixtures/text-to-speech/python/requests/0.fal-ai.py +++ b/packages/tasks-gen/snippets-fixtures/text-to-speech/python/requests/0.fal-ai.py @@ -1,8 +1,9 @@ +import os import requests API_URL = "https://fal.run/" headers = { - "Authorization": "Key api_token", + "Authorization": f"Key {os.environ['FAL_AI_API_KEY']}", } def query(payload): diff --git a/packages/tasks-gen/snippets-fixtures/text-to-video/js/huggingface.js/0.fal-ai.js b/packages/tasks-gen/snippets-fixtures/text-to-video/js/huggingface.js/0.fal-ai.js index 9d0b46ce27..c01c0cead6 100644 --- a/packages/tasks-gen/snippets-fixtures/text-to-video/js/huggingface.js/0.fal-ai.js +++ b/packages/tasks-gen/snippets-fixtures/text-to-video/js/huggingface.js/0.fal-ai.js @@ -1,6 +1,6 @@ import { InferenceClient } from "@huggingface/inference"; -const client = new InferenceClient("api_token"); +const client = new InferenceClient(process.env.HF_TOKEN); const video = await client.textToVideo({ provider: "fal-ai", diff --git a/packages/tasks-gen/snippets-fixtures/text-to-video/js/huggingface.js/0.replicate.js b/packages/tasks-gen/snippets-fixtures/text-to-video/js/huggingface.js/0.replicate.js index 43b7f4de64..4a59386ae2 100644 --- a/packages/tasks-gen/snippets-fixtures/text-to-video/js/huggingface.js/0.replicate.js +++ b/packages/tasks-gen/snippets-fixtures/text-to-video/js/huggingface.js/0.replicate.js @@ -1,6 +1,6 @@ import { InferenceClient } from "@huggingface/inference"; -const client = new InferenceClient("api_token"); +const client = new InferenceClient(process.env.HF_TOKEN); const video = await client.textToVideo({ provider: "replicate", diff --git a/packages/tasks-gen/snippets-fixtures/text-to-video/python/huggingface_hub/0.fal-ai.py b/packages/tasks-gen/snippets-fixtures/text-to-video/python/huggingface_hub/0.fal-ai.py index 240c59184d..d9925fb097 100644 --- a/packages/tasks-gen/snippets-fixtures/text-to-video/python/huggingface_hub/0.fal-ai.py +++ b/packages/tasks-gen/snippets-fixtures/text-to-video/python/huggingface_hub/0.fal-ai.py @@ -1,8 +1,9 @@ +import os from huggingface_hub import InferenceClient client = InferenceClient( provider="fal-ai", - api_key="api_token", + api_key=os.environ["HF_TOKEN"], ) video = client.text_to_video( diff --git a/packages/tasks-gen/snippets-fixtures/text-to-video/python/huggingface_hub/0.replicate.py b/packages/tasks-gen/snippets-fixtures/text-to-video/python/huggingface_hub/0.replicate.py index 6299fe51d5..dc05628c93 100644 --- a/packages/tasks-gen/snippets-fixtures/text-to-video/python/huggingface_hub/0.replicate.py +++ b/packages/tasks-gen/snippets-fixtures/text-to-video/python/huggingface_hub/0.replicate.py @@ -1,8 +1,9 @@ +import os from huggingface_hub import InferenceClient client = InferenceClient( provider="replicate", - api_key="api_token", + api_key=os.environ["HF_TOKEN"], ) video = client.text_to_video( diff --git a/packages/tasks-gen/snippets-fixtures/with-access-token/js/huggingface.js/0.hf-inference.js b/packages/tasks-gen/snippets-fixtures/with-access-token/js/huggingface.js/0.hf-inference.js new file mode 100644 index 0000000000..4ec1ceb3fd --- /dev/null +++ b/packages/tasks-gen/snippets-fixtures/with-access-token/js/huggingface.js/0.hf-inference.js @@ -0,0 +1,16 @@ +import { InferenceClient } from "@huggingface/inference"; + +const client = new InferenceClient("hf_xxx"); + +const chatCompletion = await client.chatCompletion({ + provider: "hf-inference", + model: "meta-llama/Llama-3.1-8B-Instruct", + messages: [ + { + role: "user", + content: "What is the capital of France?", + }, + ], +}); + +console.log(chatCompletion.choices[0].message); \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/with-access-token/js/openai/0.hf-inference.js b/packages/tasks-gen/snippets-fixtures/with-access-token/js/openai/0.hf-inference.js new file mode 100644 index 0000000000..163bea7a27 --- /dev/null +++ b/packages/tasks-gen/snippets-fixtures/with-access-token/js/openai/0.hf-inference.js @@ -0,0 +1,18 @@ +import { OpenAI } from "openai"; + +const client = new OpenAI({ + baseURL: "https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.1-8B-Instruct/v1", + apiKey: "hf_xxx", +}); + +const chatCompletion = await client.chat.completions.create({ + model: "meta-llama/Llama-3.1-8B-Instruct", + messages: [ + { + role: "user", + content: "What is the capital of France?", + }, + ], +}); + +console.log(chatCompletion.choices[0].message); \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/with-access-token/python/huggingface_hub/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/with-access-token/python/huggingface_hub/0.hf-inference.py new file mode 100644 index 0000000000..4fab3f920a --- /dev/null +++ b/packages/tasks-gen/snippets-fixtures/with-access-token/python/huggingface_hub/0.hf-inference.py @@ -0,0 +1,18 @@ +from huggingface_hub import InferenceClient + +client = InferenceClient( + provider="hf-inference", + api_key="hf_xxx", +) + +completion = client.chat.completions.create( + model="meta-llama/Llama-3.1-8B-Instruct", + messages=[ + { + "role": "user", + "content": "What is the capital of France?" + } + ], +) + +print(completion.choices[0].message) \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/with-access-token/python/openai/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/with-access-token/python/openai/0.hf-inference.py new file mode 100644 index 0000000000..7545c575b4 --- /dev/null +++ b/packages/tasks-gen/snippets-fixtures/with-access-token/python/openai/0.hf-inference.py @@ -0,0 +1,18 @@ +from openai import OpenAI + +client = OpenAI( + base_url="https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.1-8B-Instruct/v1", + api_key="hf_xxx", +) + +completion = client.chat.completions.create( + model="meta-llama/Llama-3.1-8B-Instruct", + messages=[ + { + "role": "user", + "content": "What is the capital of France?" + } + ], +) + +print(completion.choices[0].message) \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/with-access-token/python/requests/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/with-access-token/python/requests/0.hf-inference.py new file mode 100644 index 0000000000..a48fffb423 --- /dev/null +++ b/packages/tasks-gen/snippets-fixtures/with-access-token/python/requests/0.hf-inference.py @@ -0,0 +1,22 @@ +import requests + +API_URL = "https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.1-8B-Instruct/v1/chat/completions" +headers = { + "Authorization": "Bearer hf_xxx", +} + +def query(payload): + response = requests.post(API_URL, headers=headers, json=payload) + return response.json() + +response = query({ + "messages": [ + { + "role": "user", + "content": "What is the capital of France?" + } + ], + "model": "meta-llama/Llama-3.1-8B-Instruct" +}) + +print(response["choices"][0]["message"]) \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/with-access-token/sh/curl/0.hf-inference.sh b/packages/tasks-gen/snippets-fixtures/with-access-token/sh/curl/0.hf-inference.sh new file mode 100644 index 0000000000..e45e5bc7c2 --- /dev/null +++ b/packages/tasks-gen/snippets-fixtures/with-access-token/sh/curl/0.hf-inference.sh @@ -0,0 +1,13 @@ +curl https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.1-8B-Instruct/v1/chat/completions \ + -H 'Authorization: Bearer hf_xxx' \ + -H 'Content-Type: application/json' \ + -d '{ + "messages": [ + { + "role": "user", + "content": "What is the capital of France?" + } + ], + "model": "meta-llama/Llama-3.1-8B-Instruct", + "stream": false + }' \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/zero-shot-classification/js/fetch/0.hf-inference.js b/packages/tasks-gen/snippets-fixtures/zero-shot-classification/js/fetch/0.hf-inference.js index 1cd4606452..61c575bd33 100644 --- a/packages/tasks-gen/snippets-fixtures/zero-shot-classification/js/fetch/0.hf-inference.js +++ b/packages/tasks-gen/snippets-fixtures/zero-shot-classification/js/fetch/0.hf-inference.js @@ -3,7 +3,7 @@ async function query(data) { "https://router.huggingface.co/hf-inference/models/facebook/bart-large-mnli", { headers: { - Authorization: "Bearer api_token", + Authorization: `Bearer ${process.env.HF_TOKEN}`, "Content-Type": "application/json", }, method: "POST", diff --git a/packages/tasks-gen/snippets-fixtures/zero-shot-classification/python/requests/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/zero-shot-classification/python/requests/0.hf-inference.py index 6211303ac4..258773e0ae 100644 --- a/packages/tasks-gen/snippets-fixtures/zero-shot-classification/python/requests/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/zero-shot-classification/python/requests/0.hf-inference.py @@ -1,8 +1,9 @@ +import os import requests API_URL = "https://router.huggingface.co/hf-inference/models/facebook/bart-large-mnli" headers = { - "Authorization": "Bearer api_token", + "Authorization": f"Bearer {os.environ['HF_TOKEN']}", } def query(payload): diff --git a/packages/tasks-gen/snippets-fixtures/zero-shot-classification/sh/curl/0.hf-inference.sh b/packages/tasks-gen/snippets-fixtures/zero-shot-classification/sh/curl/0.hf-inference.sh index bd1024f3d3..c8c0237e84 100644 --- a/packages/tasks-gen/snippets-fixtures/zero-shot-classification/sh/curl/0.hf-inference.sh +++ b/packages/tasks-gen/snippets-fixtures/zero-shot-classification/sh/curl/0.hf-inference.sh @@ -2,4 +2,4 @@ curl https://router.huggingface.co/hf-inference/models/facebook/bart-large-mnli -X POST \ -d '{"inputs": "Hi, I recently bought a device from your company but it is not working as advertised and I would like to get reimbursed!", "parameters": {"candidate_labels": ["refund", "legal", "faq"]}}' \ -H 'Content-Type: application/json' \ - -H 'Authorization: Bearer api_token' \ No newline at end of file + -H "Authorization: Bearer $HF_TOKEN" \ No newline at end of file diff --git a/packages/tasks-gen/snippets-fixtures/zero-shot-image-classification/python/requests/0.hf-inference.py b/packages/tasks-gen/snippets-fixtures/zero-shot-image-classification/python/requests/0.hf-inference.py index 4755a924fd..4d47016895 100644 --- a/packages/tasks-gen/snippets-fixtures/zero-shot-image-classification/python/requests/0.hf-inference.py +++ b/packages/tasks-gen/snippets-fixtures/zero-shot-image-classification/python/requests/0.hf-inference.py @@ -1,9 +1,10 @@ +import os import base64 import requests API_URL = "https://router.huggingface.co/hf-inference/models/openai/clip-vit-large-patch14" headers = { - "Authorization": "Bearer api_token", + "Authorization": f"Bearer {os.environ['HF_TOKEN']}", } def query(data): diff --git a/packages/tasks/src/model-libraries-snippets.ts b/packages/tasks/src/model-libraries-snippets.ts index d8f80ef998..798fb8358b 100644 --- a/packages/tasks/src/model-libraries-snippets.ts +++ b/packages/tasks/src/model-libraries-snippets.ts @@ -115,7 +115,7 @@ export const bm25s = (model: ModelData): string[] => [ retriever = BM25HF.load_from_hub("${model.id}")`, ]; -export const chatterbox = (model: ModelData): string[] => [ +export const chatterbox = (): string[] => [ `# pip install chatterbox-tts import torchaudio as ta from chatterbox.tts import ChatterboxTTS