Skip to content

Commit 6e65421

Browse files
authored
Do not require all dummy api keys in inference tests (#1226)
I currently find it annoying when I want to run tests locally (that already have cached tapes) to set the env variables to `"dummy"`: ```sh HF_FAL_KEY=dummy pnpm run test -t "Fal AI" ``` This PR is a suggestion to get rid of that and default to `"dummy"` in the code instead of in the CI workflow file. Happy to get better suggestions if you think of one (in Python I would do an automatic fixture "get_api_key_or_dummy" but quite specific to pytest). As a bonus, no need to update `.github/workflows/test.yml` when adding a new provider. WDYT?
1 parent 2027973 commit 6e65421

File tree

2 files changed

+20
-50
lines changed

2 files changed

+20
-50
lines changed

.github/workflows/test.yml

Lines changed: 0 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -41,16 +41,6 @@ jobs:
4141
run: VCR_MODE=playback pnpm --filter ...[${{ steps.since.outputs.SINCE }}] test
4242
env:
4343
HF_TOKEN: ${{ secrets.HF_TOKEN }}
44-
HF_BLACK_FOREST_LABS_KEY: dummy
45-
HF_COHERE_KEY: dummy
46-
HF_FAL_KEY: dummy
47-
HF_FIREWORKS_KEY: dummy
48-
HF_HYPERBOLIC_KEY: dummy
49-
HF_NEBIUS_KEY: dummy
50-
HF_NOVITA_KEY: dummy
51-
HF_REPLICATE_KEY: dummy
52-
HF_SAMBANOVA_KEY: dummy
53-
HF_TOGETHER_KEY: dummy
5444

5545
browser:
5646
runs-on: ubuntu-latest
@@ -87,16 +77,6 @@ jobs:
8777
run: VCR_MODE=playback pnpm --filter ...[${{ steps.since.outputs.SINCE }}] test:browser
8878
env:
8979
HF_TOKEN: ${{ secrets.HF_TOKEN }}
90-
HF_BLACK_FOREST_LABS_KEY: dummy
91-
HF_COHERE_KEY: dummy
92-
HF_FAL_KEY: dummy
93-
HF_FIREWORKS_KEY: dummy
94-
HF_HYPERBOLIC_KEY: dummy
95-
HF_NEBIUS_KEY: dummy
96-
HF_NOVITA_KEY: dummy
97-
HF_REPLICATE_KEY: dummy
98-
HF_SAMBANOVA_KEY: dummy
99-
HF_TOGETHER_KEY: dummy
10080

10181
e2e:
10282
runs-on: ubuntu-latest
@@ -160,13 +140,3 @@ jobs:
160140
env:
161141
NPM_CONFIG_REGISTRY: http://localhost:4874/
162142
HF_TOKEN: ${{ secrets.HF_TOKEN }}
163-
HF_BLACK_FOREST_LABS_KEY: dummy
164-
HF_COHERE_KEY: dummy
165-
HF_FAL_KEY: dummy
166-
HF_FIREWORKS_KEY: dummy
167-
HF_HYPERBOLIC_KEY: dummy
168-
HF_NEBIUS_KEY: dummy
169-
HF_NOVITA_KEY: dummy
170-
HF_REPLICATE_KEY: dummy
171-
HF_SAMBANOVA_KEY: dummy
172-
HF_TOGETHER_KEY: dummy

packages/inference/test/HfInference.spec.ts

Lines changed: 20 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -787,7 +787,7 @@ describe.concurrent("HfInference", () => {
787787
describe.concurrent(
788788
"Fal AI",
789789
() => {
790-
const client = new HfInference(env.HF_FAL_KEY);
790+
const client = new HfInference(env.HF_FAL_KEY ?? "dummy");
791791

792792
it(`textToImage - black-forest-labs/FLUX.1-schnell`, async () => {
793793
const res = await client.textToImage({
@@ -818,7 +818,7 @@ describe.concurrent("HfInference", () => {
818818
seed: 176,
819819
},
820820
provider: "fal-ai",
821-
accessToken: env.HF_FAL_KEY,
821+
accessToken: env.HF_FAL_KEY ?? "dummy",
822822
});
823823
expect(res).toBeInstanceOf(Blob);
824824
});
@@ -834,7 +834,7 @@ describe.concurrent("HfInference", () => {
834834
resolution: "480p",
835835
},
836836
provider: "fal-ai",
837-
accessToken: env.HF_FAL_KEY,
837+
accessToken: env.HF_FAL_KEY ?? "dummy",
838838
});
839839
expect(res).toBeInstanceOf(Blob);
840840
});
@@ -848,7 +848,7 @@ describe.concurrent("HfInference", () => {
848848
num_frames: 2,
849849
},
850850
provider: "fal-ai",
851-
accessToken: env.HF_FAL_KEY,
851+
accessToken: env.HF_FAL_KEY ?? "dummy",
852852
});
853853
expect(res).toBeInstanceOf(Blob);
854854
});
@@ -862,7 +862,7 @@ describe.concurrent("HfInference", () => {
862862
num_inference_steps: 2,
863863
},
864864
provider: "fal-ai",
865-
accessToken: env.HF_FAL_KEY,
865+
accessToken: env.HF_FAL_KEY ?? "dummy",
866866
});
867867
expect(res).toBeInstanceOf(Blob);
868868
});
@@ -873,7 +873,7 @@ describe.concurrent("HfInference", () => {
873873
describe.concurrent(
874874
"Replicate",
875875
() => {
876-
const client = new HfInference(env.HF_REPLICATE_KEY);
876+
const client = new HfInference(env.HF_REPLICATE_KEY ?? "dummy");
877877

878878
it("textToImage canonical - black-forest-labs/FLUX.1-schnell", async () => {
879879
const res = await client.textToImage({
@@ -970,7 +970,7 @@ describe.concurrent("HfInference", () => {
970970

971971
it("textToVideo Mochi", async () => {
972972
const res = await textToVideo({
973-
accessToken: env.HF_REPLICATE_KEY,
973+
accessToken: env.HF_REPLICATE_KEY ?? "dummy",
974974
model: "genmo/mochi-1-preview",
975975
provider: "replicate",
976976
inputs: "A running dog",
@@ -989,7 +989,7 @@ describe.concurrent("HfInference", () => {
989989
describe.concurrent(
990990
"SambaNova",
991991
() => {
992-
const client = new HfInference(env.HF_SAMBANOVA_KEY);
992+
const client = new HfInference(env.HF_SAMBANOVA_KEY ?? "dummy");
993993

994994
it("chatCompletion", async () => {
995995
const res = await client.chatCompletion({
@@ -1023,7 +1023,7 @@ describe.concurrent("HfInference", () => {
10231023
describe.concurrent(
10241024
"Together",
10251025
() => {
1026-
const client = new HfInference(env.HF_TOGETHER_KEY);
1026+
const client = new HfInference(env.HF_TOGETHER_KEY ?? "dummy");
10271027

10281028
it("chatCompletion", async () => {
10291029
const res = await client.chatCompletion({
@@ -1078,7 +1078,7 @@ describe.concurrent("HfInference", () => {
10781078
describe.concurrent(
10791079
"Nebius",
10801080
() => {
1081-
const client = new HfInference(env.HF_NEBIUS_KEY);
1081+
const client = new HfInference(env.HF_NEBIUS_KEY ?? "dummy");
10821082

10831083
HARDCODED_MODEL_ID_MAPPING.nebius = {
10841084
"meta-llama/Llama-3.1-8B-Instruct": "meta-llama/Meta-Llama-3.1-8B-Instruct",
@@ -1132,7 +1132,7 @@ describe.concurrent("HfInference", () => {
11321132
model: "black-forest-labs/Flux.1-dev",
11331133
provider: "together",
11341134
messages: [{ role: "user", content: "Complete this sentence with words, one plus one is equal " }],
1135-
accessToken: env.HF_TOGETHER_KEY,
1135+
accessToken: env.HF_TOGETHER_KEY ?? "dummy",
11361136
})
11371137
).rejects.toThrowError(
11381138
"Model black-forest-labs/Flux.1-dev is not supported for task conversational and provider together"
@@ -1143,7 +1143,7 @@ describe.concurrent("HfInference", () => {
11431143
describe.concurrent(
11441144
"Fireworks",
11451145
() => {
1146-
const client = new HfInference(env.HF_FIREWORKS_KEY);
1146+
const client = new HfInference(env.HF_FIREWORKS_KEY ?? "dummy");
11471147

11481148
HARDCODED_MODEL_ID_MAPPING["fireworks-ai"] = {
11491149
"deepseek-ai/DeepSeek-R1": "accounts/fireworks/models/deepseek-r1",
@@ -1199,7 +1199,7 @@ describe.concurrent("HfInference", () => {
11991199

12001200
it("chatCompletion - hyperbolic", async () => {
12011201
const res = await chatCompletion({
1202-
accessToken: env.HF_HYPERBOLIC_KEY,
1202+
accessToken: env.HF_HYPERBOLIC_KEY ?? "dummy",
12031203
model: "meta-llama/Llama-3.2-3B-Instruct",
12041204
provider: "hyperbolic",
12051205
messages: [{ role: "user", content: "Complete this sentence with words, one plus one is equal " }],
@@ -1220,7 +1220,7 @@ describe.concurrent("HfInference", () => {
12201220

12211221
it("chatCompletion stream", async () => {
12221222
const stream = chatCompletionStream({
1223-
accessToken: env.HF_HYPERBOLIC_KEY,
1223+
accessToken: env.HF_HYPERBOLIC_KEY ?? "dummy",
12241224
model: "meta-llama/Llama-3.3-70B-Instruct",
12251225
provider: "hyperbolic",
12261226
messages: [{ role: "user", content: "Complete the equation 1 + 1 = , just the answer" }],
@@ -1236,7 +1236,7 @@ describe.concurrent("HfInference", () => {
12361236

12371237
it("textToImage", async () => {
12381238
const res = await textToImage({
1239-
accessToken: env.HF_HYPERBOLIC_KEY,
1239+
accessToken: env.HF_HYPERBOLIC_KEY ?? "dummy",
12401240
model: "stabilityai/stable-diffusion-2",
12411241
provider: "hyperbolic",
12421242
inputs: "award winning high resolution photo of a giant tortoise",
@@ -1250,7 +1250,7 @@ describe.concurrent("HfInference", () => {
12501250

12511251
it("textGeneration", async () => {
12521252
const res = await textGeneration({
1253-
accessToken: env.HF_HYPERBOLIC_KEY,
1253+
accessToken: env.HF_HYPERBOLIC_KEY ?? "dummy",
12541254
model: "meta-llama/Llama-3.1-405B",
12551255
provider: "hyperbolic",
12561256
inputs: "Paris is",
@@ -1269,7 +1269,7 @@ describe.concurrent("HfInference", () => {
12691269
describe.concurrent(
12701270
"Novita",
12711271
() => {
1272-
const client = new HfInference(env.HF_NOVITA_KEY);
1272+
const client = new HfInference(env.HF_NOVITA_KEY ?? "dummy");
12731273

12741274
HARDCODED_MODEL_ID_MAPPING["novita"] = {
12751275
"meta-llama/llama-3.1-8b-instruct": "meta-llama/llama-3.1-8b-instruct",
@@ -1325,7 +1325,7 @@ describe.concurrent("HfInference", () => {
13251325
const res = await textToImage({
13261326
model: "black-forest-labs/FLUX.1-dev",
13271327
provider: "black-forest-labs",
1328-
accessToken: env.HF_BLACK_FOREST_LABS_KEY,
1328+
accessToken: env.HF_BLACK_FOREST_LABS_KEY ?? "dummy",
13291329
inputs: "A raccoon driving a truck",
13301330
parameters: {
13311331
height: 256,
@@ -1342,7 +1342,7 @@ describe.concurrent("HfInference", () => {
13421342
{
13431343
model: "black-forest-labs/FLUX.1-dev",
13441344
provider: "black-forest-labs",
1345-
accessToken: env.HF_BLACK_FOREST_LABS_KEY,
1345+
accessToken: env.HF_BLACK_FOREST_LABS_KEY ?? "dummy",
13461346
inputs: "A raccoon driving a truck",
13471347
parameters: {
13481348
height: 256,
@@ -1362,7 +1362,7 @@ describe.concurrent("HfInference", () => {
13621362
describe.concurrent(
13631363
"Cohere",
13641364
() => {
1365-
const client = new HfInference(env.HF_COHERE_KEY);
1365+
const client = new HfInference(env.HF_COHERE_KEY ?? "dummy");
13661366

13671367
HARDCODED_MODEL_ID_MAPPING["cohere"] = {
13681368
"CohereForAI/c4ai-command-r7b-12-2024": "command-r7b-12-2024",

0 commit comments

Comments
 (0)