-
Notifications
You must be signed in to change notification settings - Fork 542
Add inference snippets for image-text-to-text #927
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
38dbee7
fd712c1
1970e31
9cc3d86
f6d7ff8
d9d5a43
e4c6cba
11a15ea
0f8452c
1fc2586
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -5,18 +5,39 @@ import type { ModelDataMinimal } from "./types.js"; | |
| export const snippetConversational = (model: ModelDataMinimal, accessToken: string): string => | ||
| `from huggingface_hub import InferenceClient | ||
|
|
||
| client = InferenceClient( | ||
| "${model.id}", | ||
| token="${accessToken || "{API_TOKEN}"}", | ||
| ) | ||
| client = InferenceClient(api_key="${accessToken || "{API_TOKEN}"}") | ||
|
|
||
| for message in client.chat_completion( | ||
| model="${model.id}", | ||
| messages=[{"role": "user", "content": "What is the capital of France?"}], | ||
| max_tokens=500, | ||
| stream=True, | ||
| ): | ||
| print(message.choices[0].delta.content, end="")`; | ||
|
|
||
| export const snippetConversationalWithImage = (model: ModelDataMinimal, accessToken: string): string => | ||
| `from huggingface_hub import InferenceClient | ||
|
|
||
| client = InferenceClient(api_key="${accessToken || "{API_TOKEN}"}") | ||
|
|
||
| image_url = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. maybe good to show an example with from PIL import Image
import requests
from io import BytesIO
import base64
image_url = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
response = requests.get(image_url)
image = Image.open(BytesIO(response.content))
# Convert the image to a byte array in PNG format
buffered = BytesIO()
image.save(buffered, format="PNG")
# Encode this byte array to base64
img_base64 = base64.b64encode(buffered.getvalue())
# Print the base64 string
print(img_base64.decode())maybe the snippet would become too long. I will let you decide
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. hmm i'd say this would complicate a bit too much (no strong opinion though) Note that this is for remote inference not local usage
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
yes, I meant more like: remote inference using local img file (otherwise, to use the snippet, user needs to upload their image and get its url)
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. do you know if it's possible to have several snippets by returning a list, same as for code snippets ?
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
for inference snippet, not possible right now. So suggest that:
|
||
|
|
||
| for message in client.chat_completion( | ||
| model="${model.id}", | ||
| messages=[ | ||
| { | ||
| "role": "user", | ||
| "content": [ | ||
| {"type": "image_url", "image_url": {"url": image_url}}, | ||
| {"type": "text", "text": "Describe this image in one sentence."}, | ||
| ], | ||
| } | ||
| ], | ||
| max_tokens=500, | ||
| stream=True, | ||
| ): | ||
| print(message.choices[0].delta.content, end="")`; | ||
|
|
||
| export const snippetZeroShotClassification = (model: ModelDataMinimal): string => | ||
| `def query(payload): | ||
| response = requests.post(API_URL, headers=headers, json=payload) | ||
|
|
@@ -153,9 +174,12 @@ export const pythonSnippets: Partial<Record<PipelineType, (model: ModelDataMinim | |
| }; | ||
|
|
||
| export function getPythonInferenceSnippet(model: ModelDataMinimal, accessToken: string): string { | ||
| if (model.pipeline_tag === "text-generation" && model.config?.tokenizer_config?.chat_template) { | ||
| if (model.pipeline_tag === "text-generation" && model.tags.includes("conversational")) { | ||
| // Conversational model detected, so we display a code snippet that features the Messages API | ||
| return snippetConversational(model, accessToken); | ||
| } else if (model.pipeline_tag === "image-text-to-text" && model.tags.includes("conversational")) { | ||
| // Example sending an image to the Message API | ||
| return snippetConversationalWithImage(model, accessToken); | ||
| } else { | ||
| const body = | ||
| model.pipeline_tag && model.pipeline_tag in pythonSnippets | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.