Skip to content

Commit df0c580

Browse files
committed
names
1 parent 5f1ee42 commit df0c580

20 files changed

+152
-138
lines changed

packages/inference/README.md

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -128,21 +128,21 @@ The inference package provides specific error types to help you handle different
128128

129129
The package defines several error types that extend the base `Error` class:
130130

131-
- `HfInferenceError`: Base error class for all Hugging Face Inference errors
132-
- `HfInferenceInputError`: Thrown when there are issues with input parameters
133-
- `HfInferenceProviderApiError`: Thrown when there are API-level errors from providers
134-
- `HfInferenceHubApiError`: Thrown when there are API-levels errors from the Hugging Face Hub
135-
- `HfInferenceProviderOutputError`: Thrown when there are issues with providers' API responses format
131+
- `InferenceClientError`: Base error class for all Hugging Face Inference errors
132+
- `InferenceClientInputError`: Thrown when there are issues with input parameters
133+
- `InferenceClientProviderApiError`: Thrown when there are API-level errors from providers
134+
- `InferenceClientHubApiError`: Thrown when there are API-levels errors from the Hugging Face Hub
135+
- `InferenceClientProviderOutputError`: Thrown when there are issues with providers' API responses format
136136

137137
### Example Usage
138138

139139
```typescript
140140
import { InferenceClient } from "@huggingface/inference";
141141
import {
142-
HfInferenceError,
143-
HfInferenceProviderApiError,
144-
HfInferenceProviderOutputError,
145-
HfInferenceHubApiError,
142+
InferenceClientError,
143+
InferenceClientProviderApiError,
144+
InferenceClientProviderOutputError,
145+
InferenceClientHubApiError,
146146
} from "@huggingface/inference";
147147

148148
const hf = new HfInference();
@@ -153,20 +153,20 @@ try {
153153
inputs: "Hello, I'm a language model",
154154
});
155155
} catch (error) {
156-
if (error instanceof HfInferenceProviderApiError) {
156+
if (error instanceof InferenceClientProviderApiError) {
157157
// Handle API errors (e.g., rate limits, authentication issues)
158158
console.error("Provider API Error:", error.message);
159159
console.error("HTTP Request details:", error.request);
160160
console.error("HTTP Response details:", error.response);
161-
if (error instanceof HfInferenceHubApiError) {
161+
if (error instanceof InferenceClientHubApiError) {
162162
// Handle API errors (e.g., rate limits, authentication issues)
163163
console.error("Hub API Error:", error.message);
164164
console.error("HTTP Request details:", error.request);
165165
console.error("HTTP Response details:", error.response);
166-
} else if (error instanceof HfInferenceProviderOutputError) {
166+
} else if (error instanceof InferenceClientProviderOutputError) {
167167
// Handle malformed responses from providers
168168
console.error("Provider Output Error:", error.message);
169-
} else if (error instanceof HfInferenceInputError) {
169+
} else if (error instanceof InferenceClientInputError) {
170170
// Handle invalid input parameters
171171
console.error("Input Error:", error.message);
172172
} else {
@@ -182,7 +182,7 @@ try {
182182
inputs: "Hello, I'm a language model",
183183
});
184184
} catch (error) {
185-
if (error instanceof HfInferenceError) {
185+
if (error instanceof InferenceClientError) {
186186
// Handle errors from @huggingface/inference
187187
console.error("Error from InferenceClient:", error);
188188
} else {
@@ -194,7 +194,7 @@ try {
194194
195195
### Error Details
196196
197-
#### HfInferenceProviderApiError
197+
#### InferenceClientProviderApiError
198198
199199
This error occurs when there are issues with the API request when performing inference at the selected provider.
200200
@@ -203,7 +203,7 @@ It has several properties:
203203
- `request`: Details about the failed request (URL, method, headers)
204204
- `response`: Response details including status code and body
205205
206-
#### HfInferenceHubApiError
206+
#### InferenceClientHubApiError
207207
208208
This error occurs when there are issues with the API request when requesting the Hugging Face Hub API.
209209
@@ -213,11 +213,11 @@ It has several properties:
213213
- `response`: Response details including status code and body
214214
215215
216-
#### HfInferenceProviderOutputError
216+
#### InferenceClientProviderOutputError
217217
218218
This error occurs when a provider returns a response in an unexpected format.
219219
220-
#### HfInferenceInputError
220+
#### InferenceClientInputError
221221
222222
This error occurs when input parameters are invalid or missing. The error message describes what's wrong with the input.
223223

packages/inference/src/error.ts

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,14 @@ import type { JsonObject } from "./vendor/type-fest/basic.js";
33
/**
44
* Base class for all inference-related errors.
55
*/
6-
export abstract class HfInferenceError extends Error {
6+
export abstract class InferenceClientError extends Error {
77
constructor(message: string) {
88
super(message);
9-
this.name = "HfInferenceError";
9+
this.name = "InferenceClientError";
1010
}
1111
}
1212

13-
export class HfInferenceInputError extends HfInferenceError {
13+
export class InferenceClientInputError extends InferenceClientError {
1414
constructor(message: string) {
1515
super(message);
1616
this.name = "InputError";
@@ -30,7 +30,7 @@ interface HttpResponse {
3030
body: JsonObject | string;
3131
}
3232

33-
abstract class HfInferenceHttpRequestError extends HfInferenceError {
33+
abstract class InferenceClientHttpRequestError extends InferenceClientError {
3434
httpRequest: HttpRequest;
3535
httpResponse: HttpResponse;
3636
constructor(message: string, httpRequest: HttpRequest, httpResponse: HttpResponse) {
@@ -54,7 +54,7 @@ abstract class HfInferenceHttpRequestError extends HfInferenceError {
5454
/**
5555
* Thrown when the HTTP request to the provider fails, e.g. due to API issues or server errors.
5656
*/
57-
export class HfInferenceProviderApiError extends HfInferenceHttpRequestError {
57+
export class InferenceClientProviderApiError extends InferenceClientHttpRequestError {
5858
constructor(message: string, httpRequest: HttpRequest, httpResponse: HttpResponse) {
5959
super(message, httpRequest, httpResponse);
6060
this.name = "ProviderApiError";
@@ -64,7 +64,7 @@ export class HfInferenceProviderApiError extends HfInferenceHttpRequestError {
6464
/**
6565
* Thrown when the HTTP request to the hub fails, e.g. due to API issues or server errors.
6666
*/
67-
export class HfInferenceHubApiError extends HfInferenceHttpRequestError {
67+
export class InferenceClientHubApiError extends InferenceClientHttpRequestError {
6868
constructor(message: string, httpRequest: HttpRequest, httpResponse: HttpResponse) {
6969
super(message, httpRequest, httpResponse);
7070
this.name = "HubApiError";
@@ -74,7 +74,7 @@ export class HfInferenceHubApiError extends HfInferenceHttpRequestError {
7474
/**
7575
* Thrown when the inference output returned by the provider is invalid / does not match the expectations
7676
*/
77-
export class HfInferenceProviderOutputError extends HfInferenceError {
77+
export class InferenceClientProviderOutputError extends InferenceClientError {
7878
constructor(message: string) {
7979
super(message);
8080
this.name = "ProviderOutputError";

packages/inference/src/lib/getInferenceProviderMapping.ts

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,7 @@ import { HARDCODED_MODEL_INFERENCE_MAPPING } from "../providers/consts.js";
44
import { EQUIVALENT_SENTENCE_TRANSFORMERS_TASKS } from "../providers/hf-inference.js";
55
import type { InferenceProvider, InferenceProviderOrPolicy, ModelId } from "../types.js";
66
import { typedInclude } from "../utils/typedInclude.js";
7-
import { HfInferenceHubApiError, HfInferenceInputError } from "../error.js";
7+
import { InferenceClientHubApiError, InferenceClientInputError } from "../error.js";
88

99
export const inferenceProviderMappingCache = new Map<ModelId, InferenceProviderMapping>();
1010

@@ -41,14 +41,14 @@ export async function fetchInferenceProviderMappingForModel(
4141
if (resp.headers.get("Content-Type")?.startsWith("application/json")) {
4242
const error = await resp.json();
4343
if ("error" in error && typeof error.error === "string") {
44-
throw new HfInferenceHubApiError(
44+
throw new InferenceClientHubApiError(
4545
`Failed to fetch inference provider mapping for model ${modelId}: ${error.error}`,
4646
{ url, method: "GET" },
4747
{ requestId: resp.headers.get("x-request-id") ?? "", status: resp.status, body: error }
4848
);
4949
}
5050
} else {
51-
throw new HfInferenceHubApiError(
51+
throw new InferenceClientHubApiError(
5252
`Failed to fetch inference provider mapping for model ${modelId}`,
5353
{ url, method: "GET" },
5454
{ requestId: resp.headers.get("x-request-id") ?? "", status: resp.status, body: await resp.text() }
@@ -59,14 +59,14 @@ export async function fetchInferenceProviderMappingForModel(
5959
try {
6060
payload = await resp.json();
6161
} catch {
62-
throw new HfInferenceHubApiError(
62+
throw new InferenceClientHubApiError(
6363
`Failed to fetch inference provider mapping for model ${modelId}: malformed API response, invalid JSON`,
6464
{ url, method: "GET" },
6565
{ requestId: resp.headers.get("x-request-id") ?? "", status: resp.status, body: await resp.text() }
6666
);
6767
}
6868
if (!payload?.inferenceProviderMapping) {
69-
throw new HfInferenceHubApiError(
69+
throw new InferenceClientHubApiError(
7070
`We have not been able to find inference provider information for model ${modelId}.`,
7171
{ url, method: "GET" },
7272
{ requestId: resp.headers.get("x-request-id") ?? "", status: resp.status, body: await resp.text() }
@@ -103,7 +103,7 @@ export async function getInferenceProviderMapping(
103103
? EQUIVALENT_SENTENCE_TRANSFORMERS_TASKS
104104
: [params.task];
105105
if (!typedInclude(equivalentTasks, providerMapping.task)) {
106-
throw new HfInferenceInputError(
106+
throw new InferenceClientInputError(
107107
`Model ${params.modelId} is not supported for task ${params.task} and provider ${params.provider}. Supported task: ${providerMapping.task}.`
108108
);
109109
}
@@ -124,7 +124,7 @@ export async function resolveProvider(
124124
): Promise<InferenceProvider> {
125125
if (endpointUrl) {
126126
if (provider) {
127-
throw new HfInferenceInputError("Specifying both endpointUrl and provider is not supported.");
127+
throw new InferenceClientInputError("Specifying both endpointUrl and provider is not supported.");
128128
}
129129
/// Defaulting to hf-inference helpers / API
130130
return "hf-inference";
@@ -137,13 +137,13 @@ export async function resolveProvider(
137137
}
138138
if (provider === "auto") {
139139
if (!modelId) {
140-
throw new HfInferenceInputError("Specifying a model is required when provider is 'auto'");
140+
throw new InferenceClientInputError("Specifying a model is required when provider is 'auto'");
141141
}
142142
const inferenceProviderMapping = await fetchInferenceProviderMappingForModel(modelId);
143143
provider = Object.keys(inferenceProviderMapping)[0] as InferenceProvider | undefined;
144144
}
145145
if (!provider) {
146-
throw new HfInferenceInputError(`No Inference Provider available for model ${modelId}.`);
146+
throw new InferenceClientInputError(`No Inference Provider available for model ${modelId}.`);
147147
}
148148
return provider;
149149
}

packages/inference/src/lib/getProviderHelper.ts

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ import * as Replicate from "../providers/replicate.js";
4848
import * as Sambanova from "../providers/sambanova.js";
4949
import * as Together from "../providers/together.js";
5050
import type { InferenceProvider, InferenceProviderOrPolicy, InferenceTask } from "../types.js";
51-
import { HfInferenceInputError } from "../error.js";
51+
import { InferenceClientInputError } from "../error.js";
5252

5353
export const PROVIDERS: Record<InferenceProvider, Partial<Record<InferenceTask, TaskProviderHelper>>> = {
5454
"black-forest-labs": {
@@ -282,18 +282,18 @@ export function getProviderHelper(
282282
return new HFInference.HFInferenceTask();
283283
}
284284
if (!task) {
285-
throw new HfInferenceInputError(
285+
throw new InferenceClientInputError(
286286
"you need to provide a task name when using an external provider, e.g. 'text-to-image'"
287287
);
288288
}
289289
if (!(provider in PROVIDERS)) {
290-
throw new HfInferenceInputError(
290+
throw new InferenceClientInputError(
291291
`Provider '${provider}' not supported. Available providers: ${Object.keys(PROVIDERS)}`
292292
);
293293
}
294294
const providerTasks = PROVIDERS[provider];
295295
if (!providerTasks || !(task in providerTasks)) {
296-
throw new HfInferenceInputError(
296+
throw new InferenceClientInputError(
297297
`Task '${task}' not supported for provider '${provider}'. Available tasks: ${Object.keys(providerTasks ?? {})}`
298298
);
299299
}

packages/inference/src/lib/makeRequestOptions.ts

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ import type { InferenceProviderModelMapping } from "./getInferenceProviderMappin
55
import { getInferenceProviderMapping } from "./getInferenceProviderMapping.js";
66
import type { getProviderHelper } from "./getProviderHelper.js";
77
import { isUrl } from "./isUrl.js";
8-
import { HfInferenceHubApiError, HfInferenceInputError } from "../error.js";
8+
import { InferenceClientHubApiError, InferenceClientInputError } from "../error.js";
99

1010
/**
1111
* Lazy-loaded from huggingface.co/api/tasks when needed
@@ -34,10 +34,10 @@ export async function makeRequestOptions(
3434

3535
// Validate inputs
3636
if (args.endpointUrl && provider !== "hf-inference") {
37-
throw new HfInferenceInputError(`Cannot use endpointUrl with a third-party provider.`);
37+
throw new InferenceClientInputError(`Cannot use endpointUrl with a third-party provider.`);
3838
}
3939
if (maybeModel && isUrl(maybeModel)) {
40-
throw new HfInferenceInputError(`Model URLs are no longer supported. Use endpointUrl instead.`);
40+
throw new InferenceClientInputError(`Model URLs are no longer supported. Use endpointUrl instead.`);
4141
}
4242

4343
if (args.endpointUrl) {
@@ -52,14 +52,14 @@ export async function makeRequestOptions(
5252
}
5353

5454
if (!maybeModel && !task) {
55-
throw new HfInferenceInputError("No model provided, and no task has been specified.");
55+
throw new InferenceClientInputError("No model provided, and no task has been specified.");
5656
}
5757

5858
// eslint-disable-next-line @typescript-eslint/no-non-null-assertion
5959
const hfModel = maybeModel ?? (await loadDefaultModel(task!));
6060

6161
if (providerHelper.clientSideRoutingOnly && !maybeModel) {
62-
throw new HfInferenceInputError(`Provider ${provider} requires a model ID to be passed directly.`);
62+
throw new InferenceClientInputError(`Provider ${provider} requires a model ID to be passed directly.`);
6363
}
6464

6565
const inferenceProviderMapping = providerHelper.clientSideRoutingOnly
@@ -83,7 +83,7 @@ export async function makeRequestOptions(
8383
{ fetch: options?.fetch }
8484
);
8585
if (!inferenceProviderMapping) {
86-
throw new HfInferenceInputError(
86+
throw new InferenceClientInputError(
8787
`We have not been able to find inference provider information for model ${hfModel}.`
8888
);
8989
}
@@ -125,7 +125,7 @@ export function makeRequestOptionsFromResolvedModel(
125125
if (providerHelper.clientSideRoutingOnly) {
126126
// Closed-source providers require an accessToken (cannot be routed).
127127
if (accessToken && accessToken.startsWith("hf_")) {
128-
throw new HfInferenceInputError(`Provider ${provider} is closed-source and does not support HF tokens.`);
128+
throw new InferenceClientInputError(`Provider ${provider} is closed-source and does not support HF tokens.`);
129129
}
130130
}
131131
if (accessToken) {
@@ -199,7 +199,9 @@ async function loadDefaultModel(task: InferenceTask): Promise<string> {
199199
}
200200
const taskInfo = tasks[task];
201201
if ((taskInfo?.models.length ?? 0) <= 0) {
202-
throw new HfInferenceInputError(`No default model defined for task ${task}, please define the model explicitly.`);
202+
throw new InferenceClientInputError(
203+
`No default model defined for task ${task}, please define the model explicitly.`
204+
);
203205
}
204206
return taskInfo.models[0].id;
205207
}
@@ -209,7 +211,7 @@ async function loadTaskInfo(): Promise<Record<string, { models: { id: string }[]
209211
const res = await fetch(url);
210212

211213
if (!res.ok) {
212-
throw new HfInferenceHubApiError(
214+
throw new InferenceClientHubApiError(
213215
"Failed to load tasks definitions from Hugging Face Hub.",
214216
{ url, method: "GET" },
215217
{ requestId: res.headers.get("x-request-id") ?? "", status: res.status, body: await res.text() }
@@ -220,7 +222,7 @@ async function loadTaskInfo(): Promise<Record<string, { models: { id: string }[]
220222

221223
function removeProviderPrefix(model: string, provider: string): string {
222224
if (!model.startsWith(`${provider}/`)) {
223-
throw new HfInferenceInputError(`Models from ${provider} must be prefixed by "${provider}/". Got "${model}".`);
225+
throw new InferenceClientInputError(`Models from ${provider} must be prefixed by "${provider}/". Got "${model}".`);
224226
}
225227
return model.slice(provider.length + 1);
226228
}

packages/inference/src/providers/black-forest-labs.ts

Lines changed: 8 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,11 @@
1414
*
1515
* Thanks!
1616
*/
17-
import { HfInferenceInputError, HfInferenceProviderApiError, HfInferenceProviderOutputError } from "../error.js";
17+
import {
18+
InferenceClientInputError,
19+
InferenceClientProviderApiError,
20+
InferenceClientProviderOutputError,
21+
} from "../error.js";
1822
import type { BodyParams, HeaderParams, UrlParams } from "../types.js";
1923
import { delay } from "../utils/delay.js";
2024
import { omit } from "../utils/omit.js";
@@ -52,7 +56,7 @@ export class BlackForestLabsTextToImageTask extends TaskProviderHelper implement
5256

5357
makeRoute(params: UrlParams): string {
5458
if (!params) {
55-
throw new HfInferenceInputError("Params are required");
59+
throw new InferenceClientInputError("Params are required");
5660
}
5761
return `/v1/${params.model}`;
5862
}
@@ -70,7 +74,7 @@ export class BlackForestLabsTextToImageTask extends TaskProviderHelper implement
7074
urlObj.searchParams.set("attempt", step.toString(10));
7175
const resp = await fetch(urlObj, { headers: { "Content-Type": "application/json" } });
7276
if (!resp.ok) {
73-
throw new HfInferenceProviderApiError(
77+
throw new InferenceClientProviderApiError(
7478
"Failed to fetch result from black forest labs API",
7579
{ url: urlObj.toString(), method: "GET", headers: { "Content-Type": "application/json" } },
7680
{ requestId: resp.headers.get("x-request-id") ?? "", status: resp.status, body: await resp.text() }
@@ -96,7 +100,7 @@ export class BlackForestLabsTextToImageTask extends TaskProviderHelper implement
96100
return await image.blob();
97101
}
98102
}
99-
throw new HfInferenceProviderOutputError(
103+
throw new InferenceClientProviderOutputError(
100104
`Timed out while waiting for the result from black forest labs API - aborting after 5 attempts`
101105
);
102106
}

0 commit comments

Comments
 (0)