Skip to content

Commit 095abe6

Browse files
committed
[InferenceSnippets] Fix HF_TOKEN not used if https:// in example
1 parent 76770d7 commit 095abe6

File tree

5 files changed

+5
-5
lines changed

5 files changed

+5
-5
lines changed

packages/inference/src/snippets/getInferenceSnippets.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -466,7 +466,7 @@ function replaceAccessTokenPlaceholder(
466466
!endpointUrl && // custom endpointUrl => use a generic API_TOKEN
467467
(provider == "hf-inference" || // hf-inference provider => use $HF_TOKEN
468468
(!directRequest && // if explicit directRequest => use provider-specific token
469-
(!snippet.includes("https://") || // no URL provided => using a client => use $HF_TOKEN
469+
(snippet.includes("InferenceClient") || // using a client => use $HF_TOKEN
470470
snippet.includes("https://router.huggingface.co")))); // explicit routed request => use $HF_TOKEN
471471
const accessTokenEnvVar = useHfToken
472472
? "HF_TOKEN" // e.g. routed request or hf-inference

packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/js/huggingface.js/0.fireworks-ai.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import { InferenceClient } from "@huggingface/inference";
22

3-
const client = new InferenceClient(process.env.FIREWORKS_AI_API_KEY);
3+
const client = new InferenceClient(process.env.HF_TOKEN);
44

55
const chatCompletion = await client.chatCompletion({
66
provider: "fireworks-ai",

packages/tasks-gen/snippets-fixtures/conversational-vlm-non-stream/python/huggingface_hub/0.fireworks-ai.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44
client = InferenceClient(
55
provider="fireworks-ai",
6-
api_key=os.environ["FIREWORKS_AI_API_KEY"],
6+
api_key=os.environ["HF_TOKEN"],
77
)
88

99
completion = client.chat.completions.create(

packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/js/huggingface.js/0.fireworks-ai.js

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import { InferenceClient } from "@huggingface/inference";
22

3-
const client = new InferenceClient(process.env.FIREWORKS_AI_API_KEY);
3+
const client = new InferenceClient(process.env.HF_TOKEN);
44

55
let out = "";
66

packages/tasks-gen/snippets-fixtures/conversational-vlm-stream/python/huggingface_hub/0.fireworks-ai.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44
client = InferenceClient(
55
provider="fireworks-ai",
6-
api_key=os.environ["FIREWORKS_AI_API_KEY"],
6+
api_key=os.environ["HF_TOKEN"],
77
)
88

99
stream = client.chat.completions.create(

0 commit comments

Comments
 (0)