Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
29 changes: 28 additions & 1 deletion packages/tasks/src/snippets/curl.ts
Original file line number Diff line number Diff line change
Expand Up @@ -10,7 +10,7 @@ export const snippetBasic = (model: ModelDataMinimal, accessToken: string): stri
-H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}"`;

export const snippetTextGeneration = (model: ModelDataMinimal, accessToken: string): string => {
if (model.config?.tokenizer_config?.chat_template) {
if (model.tags.includes("conversational")) {
// Conversational model detected, so we display a code snippet that features the Messages API
return `curl 'https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions' \\
-H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" \\
Expand All @@ -27,6 +27,32 @@ export const snippetTextGeneration = (model: ModelDataMinimal, accessToken: stri
}
};

export const snippetImageTextToTextGeneration = (model: ModelDataMinimal, accessToken: string): string => {
if (model.tags.includes("conversational")) {
// Conversational model detected, so we display a code snippet that features the Messages API
return `curl 'https://api-inference.huggingface.co/models/${model.id}/v1/chat/completions' \\
-H "Authorization: Bearer ${accessToken || `{API_TOKEN}`}" \\
-H 'Content-Type: application/json' \\
-d '{
"model": "${model.id}",
"messages": [
{
"role": "user",
"content": [
{"type": "image_url", "image_url": {"url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"}},
{"type": "text", "text": "Describe this image in one sentence."}
]
}
],
"max_tokens": 500,
"stream": false
}'
`;
} else {
return snippetBasic(model, accessToken);
}
};

export const snippetZeroShotClassification = (model: ModelDataMinimal, accessToken: string): string =>
`curl https://api-inference.huggingface.co/models/${model.id} \\
-X POST \\
Expand All @@ -51,6 +77,7 @@ export const curlSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal
summarization: snippetBasic,
"feature-extraction": snippetBasic,
"text-generation": snippetTextGeneration,
"image-text-to-text": snippetImageTextToTextGeneration,
"text2text-generation": snippetBasic,
"fill-mask": snippetBasic,
"sentence-similarity": snippetBasic,
Expand Down
32 changes: 31 additions & 1 deletion packages/tasks/src/snippets/js.ts
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ query({"inputs": ${getModelInputSnippet(model)}}).then((response) => {
});`;

export const snippetTextGeneration = (model: ModelDataMinimal, accessToken: string): string => {
if (model.config?.tokenizer_config?.chat_template) {
if (model.tags.includes("conversational")) {
// Conversational model detected, so we display a code snippet that features the Messages API
return `import { HfInference } from "@huggingface/inference";

Expand All @@ -41,6 +41,35 @@ for await (const chunk of inference.chatCompletionStream({
return snippetBasic(model, accessToken);
}
};

export const snippetImageTextToTextGeneration = (model: ModelDataMinimal, accessToken: string): string => {
if (model.tags.includes("conversational")) {
// Conversational model detected, so we display a code snippet that features the Messages API
return `import { HfInference } from "@huggingface/inference";

const inference = new HfInference("${accessToken || `{API_TOKEN}`}");
const imageUrl = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg";

for await (const chunk of inference.chatCompletionStream({
model: "${model.id}",
messages: [
{
"role": "user",
"content": [
{"type": "image_url", "image_url": {"url": imageUrl}},
{"type": "text", "text": "Describe this image in one sentence."},
],
}
],
max_tokens: 500,
})) {
process.stdout.write(chunk.choices[0]?.delta?.content || "");
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Suggested change
process.stdout.write(chunk.choices[0]?.delta?.content || "");
process.stdout.write(chunk.choices[0]?.delta?.content);

}`;
} else {
return snippetBasic(model, accessToken);
}
};

export const snippetZeroShotClassification = (model: ModelDataMinimal, accessToken: string): string =>
`async function query(data) {
const response = await fetch(
Expand Down Expand Up @@ -156,6 +185,7 @@ export const jsSnippets: Partial<Record<PipelineType, (model: ModelDataMinimal,
summarization: snippetBasic,
"feature-extraction": snippetBasic,
"text-generation": snippetTextGeneration,
"image-text-to-text": snippetImageTextToTextGeneration,
"text2text-generation": snippetBasic,
"fill-mask": snippetBasic,
"sentence-similarity": snippetBasic,
Expand Down
34 changes: 29 additions & 5 deletions packages/tasks/src/snippets/python.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,18 +5,39 @@ import type { ModelDataMinimal } from "./types.js";
export const snippetConversational = (model: ModelDataMinimal, accessToken: string): string =>
`from huggingface_hub import InferenceClient

client = InferenceClient(
"${model.id}",
token="${accessToken || "{API_TOKEN}"}",
)
client = InferenceClient(api_key="${accessToken || "{API_TOKEN}"}")

for message in client.chat_completion(
model="${model.id}",
messages=[{"role": "user", "content": "What is the capital of France?"}],
max_tokens=500,
stream=True,
):
print(message.choices[0].delta.content, end="")`;

export const snippetConversationalWithImage = (model: ModelDataMinimal, accessToken: string): string =>
`from huggingface_hub import InferenceClient

client = InferenceClient(api_key="${accessToken || "{API_TOKEN}"}")

image_url = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
Copy link
Collaborator

@mishig25 mishig25 Sep 26, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

maybe good to show an example with pillow.Image.open and use that image's base64 str representation so that users can get an example where they can load local images

from PIL import Image
import requests
from io import BytesIO
import base64

image_url = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
response = requests.get(image_url)

image = Image.open(BytesIO(response.content))

# Convert the image to a byte array in PNG format
buffered = BytesIO()
image.save(buffered, format="PNG")

# Encode this byte array to base64
img_base64 = base64.b64encode(buffered.getvalue())

# Print the base64 string
print(img_base64.decode())

maybe the snippet would become too long. I will let you decide

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

hmm i'd say this would complicate a bit too much (no strong opinion though)

Note that this is for remote inference not local usage

Copy link
Collaborator

@mishig25 mishig25 Sep 26, 2024

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Note that this is for remote inference not local usage

yes, I meant more like: remote inference using local img file (otherwise, to use the snippet, user needs to upload their image and get its url)

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

do you know if it's possible to have several snippets by returning a list, same as for code snippets ?

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

do you know if it's possible to have several snippets by returning a list, same as for code snippets ?

for inference snippet, not possible right now. So suggest that:

  1. we merge this PR as it is with only image url example
  2. mayeb unify on moon-side to have inference snippet to be able to have a list like code snippets. If so, we can re-iterate and add an example with a local image


for message in client.chat_completion(
model="${model.id}",
messages=[
{
"role": "user",
"content": [
{"type": "image_url", "image_url": {"url": image_url}},
{"type": "text", "text": "Describe this image in one sentence."},
],
}
],
max_tokens=500,
stream=True,
):
print(message.choices[0].delta.content, end="")`;

export const snippetZeroShotClassification = (model: ModelDataMinimal): string =>
`def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
Expand Down Expand Up @@ -153,9 +174,12 @@ export const pythonSnippets: Partial<Record<PipelineType, (model: ModelDataMinim
};

export function getPythonInferenceSnippet(model: ModelDataMinimal, accessToken: string): string {
if (model.pipeline_tag === "text-generation" && model.config?.tokenizer_config?.chat_template) {
if (model.pipeline_tag === "text-generation" && model.tags.includes("conversational")) {
// Conversational model detected, so we display a code snippet that features the Messages API
return snippetConversational(model, accessToken);
} else if (model.pipeline_tag === "image-text-to-text" && model.tags.includes("conversational")) {
// Example sending an image to the Message API
return snippetConversationalWithImage(model, accessToken);
} else {
const body =
model.pipeline_tag && model.pipeline_tag in pythonSnippets
Expand Down
5 changes: 4 additions & 1 deletion packages/tasks/src/snippets/types.ts
Original file line number Diff line number Diff line change
Expand Up @@ -5,4 +5,7 @@ import type { ModelData } from "../model-data";
*
* Add more fields as needed.
*/
export type ModelDataMinimal = Pick<ModelData, "id" | "pipeline_tag" | "mask_token" | "library_name" | "config">;
export type ModelDataMinimal = Pick<
ModelData,
"id" | "pipeline_tag" | "mask_token" | "library_name" | "config" | "tags"
>;
Loading