Skip to content

Commit 68f7145

Browse files
committed
some ever-more random doc improvements...
1 parent a37c595 commit 68f7145

File tree

1 file changed

+12
-12
lines changed

1 file changed

+12
-12
lines changed

README.md

Lines changed: 12 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ await uploadFile({
2727
}
2828
});
2929

30-
// Use HF Inference API, or external Inference Providers!
30+
// Use all supported Inference Providers!
3131

3232
await inference.chatCompletion({
3333
model: "meta-llama/Llama-3.1-8B-Instruct",
@@ -55,7 +55,7 @@ await inference.textToImage({
5555

5656
This is a collection of JS libraries to interact with the Hugging Face API, with TS types included.
5757

58-
- [@huggingface/inference](packages/inference/README.md): Use HF Inference API (serverless), Inference Endpoints (dedicated) and all supported Inference Providers to make calls to 100,000+ Machine Learning models
58+
- [@huggingface/inference](packages/inference/README.md): Use all supported (serverless) Inference Providers or switch to Inference Endpoints (dedicated) to make calls to 100,000+ Machine Learning models
5959
- [@huggingface/hub](packages/hub/README.md): Interact with huggingface.co to create or delete repos and commit / download files
6060
- [@huggingface/agents](packages/agents/README.md): Interact with HF models through a natural language interface
6161
- [@huggingface/gguf](packages/gguf/README.md): A GGUF parser that works on remotely hosted files.
@@ -128,18 +128,18 @@ import { InferenceClient } from "@huggingface/inference";
128128

129129
const HF_TOKEN = "hf_...";
130130

131-
const inference = new InferenceClient(HF_TOKEN);
131+
const client = new InferenceClient(HF_TOKEN);
132132

133133
// Chat completion API
134-
const out = await inference.chatCompletion({
134+
const out = await client.chatCompletion({
135135
model: "meta-llama/Llama-3.1-8B-Instruct",
136136
messages: [{ role: "user", content: "Hello, nice to meet you!" }],
137137
max_tokens: 512
138138
});
139139
console.log(out.choices[0].message);
140140

141141
// Streaming chat completion API
142-
for await (const chunk of inference.chatCompletionStream({
142+
for await (const chunk of client.chatCompletionStream({
143143
model: "meta-llama/Llama-3.1-8B-Instruct",
144144
messages: [{ role: "user", content: "Hello, nice to meet you!" }],
145145
max_tokens: 512
@@ -148,14 +148,14 @@ for await (const chunk of inference.chatCompletionStream({
148148
}
149149

150150
/// Using a third-party provider:
151-
await inference.chatCompletion({
151+
await client.chatCompletion({
152152
model: "meta-llama/Llama-3.1-8B-Instruct",
153153
messages: [{ role: "user", content: "Hello, nice to meet you!" }],
154154
max_tokens: 512,
155155
provider: "sambanova", // or together, fal-ai, replicate, cohere …
156156
})
157157

158-
await inference.textToImage({
158+
await client.textToImage({
159159
model: "black-forest-labs/FLUX.1-dev",
160160
inputs: "a picture of a green bird",
161161
provider: "fal-ai",
@@ -164,7 +164,7 @@ await inference.textToImage({
164164

165165

166166
// You can also omit "model" to use the recommended model for the task
167-
await inference.translation({
167+
await client.translation({
168168
inputs: "My name is Wolfgang and I live in Amsterdam",
169169
parameters: {
170170
src_lang: "en",
@@ -173,17 +173,17 @@ await inference.translation({
173173
});
174174

175175
// pass multimodal files or URLs as inputs
176-
await inference.imageToText({
176+
await client.imageToText({
177177
model: 'nlpconnect/vit-gpt2-image-captioning',
178178
data: await (await fetch('https://picsum.photos/300/300')).blob(),
179179
})
180180

181181
// Using your own dedicated inference endpoint: https://hf.co/docs/inference-endpoints/
182-
const gpt2 = inference.endpoint('https://xyz.eu-west-1.aws.endpoints.huggingface.cloud/gpt2');
183-
const { generated_text } = await gpt2.textGeneration({ inputs: 'The answer to the universe is' });
182+
const gpt2Client = client.endpoint('https://xyz.eu-west-1.aws.endpoints.huggingface.cloud/gpt2');
183+
const { generated_text } = await gpt2Client.textGeneration({ inputs: 'The answer to the universe is' });
184184

185185
// Chat Completion
186-
const llamaEndpoint = inference.endpoint(
186+
const llamaEndpoint = client.endpoint(
187187
"https://router.huggingface.co/hf-inference/models/meta-llama/Llama-3.1-8B-Instruct"
188188
);
189189
const out = await llamaEndpoint.chatCompletion({

0 commit comments

Comments
 (0)