Skip to content

Commit

Permalink
Do not require all dummy api keys in inference tests (#1226)
Browse files Browse the repository at this point in the history
I currently find it annoying when I want to run tests locally (that
already have cached tapes) to set the env variables to `"dummy"`:

```sh
HF_FAL_KEY=dummy  pnpm run test -t "Fal AI"
```

This PR is a suggestion to get rid of that and default to `"dummy"` in
the code instead of in the CI workflow file. Happy to get better
suggestions if you think of one (in Python I would do an automatic
fixture "get_api_key_or_dummy" but quite specific to pytest).

As a bonus, no need to update `.github/workflows/test.yml` when adding a
new provider.

WDYT?
  • Loading branch information
Wauplin authored Mar 4, 2025
1 parent 2027973 commit 6e65421
Show file tree
Hide file tree
Showing 2 changed files with 20 additions and 50 deletions.
30 changes: 0 additions & 30 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -41,16 +41,6 @@ jobs:
run: VCR_MODE=playback pnpm --filter ...[${{ steps.since.outputs.SINCE }}] test
env:
HF_TOKEN: ${{ secrets.HF_TOKEN }}
HF_BLACK_FOREST_LABS_KEY: dummy
HF_COHERE_KEY: dummy
HF_FAL_KEY: dummy
HF_FIREWORKS_KEY: dummy
HF_HYPERBOLIC_KEY: dummy
HF_NEBIUS_KEY: dummy
HF_NOVITA_KEY: dummy
HF_REPLICATE_KEY: dummy
HF_SAMBANOVA_KEY: dummy
HF_TOGETHER_KEY: dummy

browser:
runs-on: ubuntu-latest
Expand Down Expand Up @@ -87,16 +77,6 @@ jobs:
run: VCR_MODE=playback pnpm --filter ...[${{ steps.since.outputs.SINCE }}] test:browser
env:
HF_TOKEN: ${{ secrets.HF_TOKEN }}
HF_BLACK_FOREST_LABS_KEY: dummy
HF_COHERE_KEY: dummy
HF_FAL_KEY: dummy
HF_FIREWORKS_KEY: dummy
HF_HYPERBOLIC_KEY: dummy
HF_NEBIUS_KEY: dummy
HF_NOVITA_KEY: dummy
HF_REPLICATE_KEY: dummy
HF_SAMBANOVA_KEY: dummy
HF_TOGETHER_KEY: dummy

e2e:
runs-on: ubuntu-latest
Expand Down Expand Up @@ -160,13 +140,3 @@ jobs:
env:
NPM_CONFIG_REGISTRY: http://localhost:4874/
HF_TOKEN: ${{ secrets.HF_TOKEN }}
HF_BLACK_FOREST_LABS_KEY: dummy
HF_COHERE_KEY: dummy
HF_FAL_KEY: dummy
HF_FIREWORKS_KEY: dummy
HF_HYPERBOLIC_KEY: dummy
HF_NEBIUS_KEY: dummy
HF_NOVITA_KEY: dummy
HF_REPLICATE_KEY: dummy
HF_SAMBANOVA_KEY: dummy
HF_TOGETHER_KEY: dummy
40 changes: 20 additions & 20 deletions packages/inference/test/HfInference.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -787,7 +787,7 @@ describe.concurrent("HfInference", () => {
describe.concurrent(
"Fal AI",
() => {
const client = new HfInference(env.HF_FAL_KEY);
const client = new HfInference(env.HF_FAL_KEY ?? "dummy");

it(`textToImage - black-forest-labs/FLUX.1-schnell`, async () => {
const res = await client.textToImage({
Expand Down Expand Up @@ -818,7 +818,7 @@ describe.concurrent("HfInference", () => {
seed: 176,
},
provider: "fal-ai",
accessToken: env.HF_FAL_KEY,
accessToken: env.HF_FAL_KEY ?? "dummy",
});
expect(res).toBeInstanceOf(Blob);
});
Expand All @@ -834,7 +834,7 @@ describe.concurrent("HfInference", () => {
resolution: "480p",
},
provider: "fal-ai",
accessToken: env.HF_FAL_KEY,
accessToken: env.HF_FAL_KEY ?? "dummy",
});
expect(res).toBeInstanceOf(Blob);
});
Expand All @@ -848,7 +848,7 @@ describe.concurrent("HfInference", () => {
num_frames: 2,
},
provider: "fal-ai",
accessToken: env.HF_FAL_KEY,
accessToken: env.HF_FAL_KEY ?? "dummy",
});
expect(res).toBeInstanceOf(Blob);
});
Expand All @@ -862,7 +862,7 @@ describe.concurrent("HfInference", () => {
num_inference_steps: 2,
},
provider: "fal-ai",
accessToken: env.HF_FAL_KEY,
accessToken: env.HF_FAL_KEY ?? "dummy",
});
expect(res).toBeInstanceOf(Blob);
});
Expand All @@ -873,7 +873,7 @@ describe.concurrent("HfInference", () => {
describe.concurrent(
"Replicate",
() => {
const client = new HfInference(env.HF_REPLICATE_KEY);
const client = new HfInference(env.HF_REPLICATE_KEY ?? "dummy");

it("textToImage canonical - black-forest-labs/FLUX.1-schnell", async () => {
const res = await client.textToImage({
Expand Down Expand Up @@ -970,7 +970,7 @@ describe.concurrent("HfInference", () => {

it("textToVideo Mochi", async () => {
const res = await textToVideo({
accessToken: env.HF_REPLICATE_KEY,
accessToken: env.HF_REPLICATE_KEY ?? "dummy",
model: "genmo/mochi-1-preview",
provider: "replicate",
inputs: "A running dog",
Expand All @@ -989,7 +989,7 @@ describe.concurrent("HfInference", () => {
describe.concurrent(
"SambaNova",
() => {
const client = new HfInference(env.HF_SAMBANOVA_KEY);
const client = new HfInference(env.HF_SAMBANOVA_KEY ?? "dummy");

it("chatCompletion", async () => {
const res = await client.chatCompletion({
Expand Down Expand Up @@ -1023,7 +1023,7 @@ describe.concurrent("HfInference", () => {
describe.concurrent(
"Together",
() => {
const client = new HfInference(env.HF_TOGETHER_KEY);
const client = new HfInference(env.HF_TOGETHER_KEY ?? "dummy");

it("chatCompletion", async () => {
const res = await client.chatCompletion({
Expand Down Expand Up @@ -1078,7 +1078,7 @@ describe.concurrent("HfInference", () => {
describe.concurrent(
"Nebius",
() => {
const client = new HfInference(env.HF_NEBIUS_KEY);
const client = new HfInference(env.HF_NEBIUS_KEY ?? "dummy");

HARDCODED_MODEL_ID_MAPPING.nebius = {
"meta-llama/Llama-3.1-8B-Instruct": "meta-llama/Meta-Llama-3.1-8B-Instruct",
Expand Down Expand Up @@ -1132,7 +1132,7 @@ describe.concurrent("HfInference", () => {
model: "black-forest-labs/Flux.1-dev",
provider: "together",
messages: [{ role: "user", content: "Complete this sentence with words, one plus one is equal " }],
accessToken: env.HF_TOGETHER_KEY,
accessToken: env.HF_TOGETHER_KEY ?? "dummy",
})
).rejects.toThrowError(
"Model black-forest-labs/Flux.1-dev is not supported for task conversational and provider together"
Expand All @@ -1143,7 +1143,7 @@ describe.concurrent("HfInference", () => {
describe.concurrent(
"Fireworks",
() => {
const client = new HfInference(env.HF_FIREWORKS_KEY);
const client = new HfInference(env.HF_FIREWORKS_KEY ?? "dummy");

HARDCODED_MODEL_ID_MAPPING["fireworks-ai"] = {
"deepseek-ai/DeepSeek-R1": "accounts/fireworks/models/deepseek-r1",
Expand Down Expand Up @@ -1199,7 +1199,7 @@ describe.concurrent("HfInference", () => {

it("chatCompletion - hyperbolic", async () => {
const res = await chatCompletion({
accessToken: env.HF_HYPERBOLIC_KEY,
accessToken: env.HF_HYPERBOLIC_KEY ?? "dummy",
model: "meta-llama/Llama-3.2-3B-Instruct",
provider: "hyperbolic",
messages: [{ role: "user", content: "Complete this sentence with words, one plus one is equal " }],
Expand All @@ -1220,7 +1220,7 @@ describe.concurrent("HfInference", () => {

it("chatCompletion stream", async () => {
const stream = chatCompletionStream({
accessToken: env.HF_HYPERBOLIC_KEY,
accessToken: env.HF_HYPERBOLIC_KEY ?? "dummy",
model: "meta-llama/Llama-3.3-70B-Instruct",
provider: "hyperbolic",
messages: [{ role: "user", content: "Complete the equation 1 + 1 = , just the answer" }],
Expand All @@ -1236,7 +1236,7 @@ describe.concurrent("HfInference", () => {

it("textToImage", async () => {
const res = await textToImage({
accessToken: env.HF_HYPERBOLIC_KEY,
accessToken: env.HF_HYPERBOLIC_KEY ?? "dummy",
model: "stabilityai/stable-diffusion-2",
provider: "hyperbolic",
inputs: "award winning high resolution photo of a giant tortoise",
Expand All @@ -1250,7 +1250,7 @@ describe.concurrent("HfInference", () => {

it("textGeneration", async () => {
const res = await textGeneration({
accessToken: env.HF_HYPERBOLIC_KEY,
accessToken: env.HF_HYPERBOLIC_KEY ?? "dummy",
model: "meta-llama/Llama-3.1-405B",
provider: "hyperbolic",
inputs: "Paris is",
Expand All @@ -1269,7 +1269,7 @@ describe.concurrent("HfInference", () => {
describe.concurrent(
"Novita",
() => {
const client = new HfInference(env.HF_NOVITA_KEY);
const client = new HfInference(env.HF_NOVITA_KEY ?? "dummy");

HARDCODED_MODEL_ID_MAPPING["novita"] = {
"meta-llama/llama-3.1-8b-instruct": "meta-llama/llama-3.1-8b-instruct",
Expand Down Expand Up @@ -1325,7 +1325,7 @@ describe.concurrent("HfInference", () => {
const res = await textToImage({
model: "black-forest-labs/FLUX.1-dev",
provider: "black-forest-labs",
accessToken: env.HF_BLACK_FOREST_LABS_KEY,
accessToken: env.HF_BLACK_FOREST_LABS_KEY ?? "dummy",
inputs: "A raccoon driving a truck",
parameters: {
height: 256,
Expand All @@ -1342,7 +1342,7 @@ describe.concurrent("HfInference", () => {
{
model: "black-forest-labs/FLUX.1-dev",
provider: "black-forest-labs",
accessToken: env.HF_BLACK_FOREST_LABS_KEY,
accessToken: env.HF_BLACK_FOREST_LABS_KEY ?? "dummy",
inputs: "A raccoon driving a truck",
parameters: {
height: 256,
Expand All @@ -1362,7 +1362,7 @@ describe.concurrent("HfInference", () => {
describe.concurrent(
"Cohere",
() => {
const client = new HfInference(env.HF_COHERE_KEY);
const client = new HfInference(env.HF_COHERE_KEY ?? "dummy");

HARDCODED_MODEL_ID_MAPPING["cohere"] = {
"CohereForAI/c4ai-command-r7b-12-2024": "command-r7b-12-2024",
Expand Down

0 comments on commit 6e65421

Please sign in to comment.