@@ -16,9 +16,24 @@ export const snippetConversational = (
1616 }
1717) : InferenceSnippet [ ] => {
1818 const streaming = opts ?. streaming ?? true ;
19- const messages : ChatCompletionInputMessage [ ] = opts ?. messages ?? [
20- { role : "user" , content : "What is the capital of France?" } ,
21- ] ;
19+ const exampleMessages : ChatCompletionInputMessage [ ] =
20+ model . pipeline_tag === "text-generation"
21+ ? [ { role : "user" , content : "What is the capital of France?" } ]
22+ : [
23+ {
24+ role : "user" ,
25+ content : [
26+ {
27+ type : "image_url" ,
28+ image_url : {
29+ url : "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" ,
30+ } ,
31+ } ,
32+ { type : "text" , text : "Describe this image in one sentence." } ,
33+ ] ,
34+ } ,
35+ ] ;
36+ const messages = opts ?. messages ?? exampleMessages ;
2237 const messagesStr = stringifyMessages ( messages , {
2338 sep : ",\n\t" ,
2439 start : `[\n\t` ,
@@ -121,30 +136,6 @@ print(completion.choices[0].message)`,
121136 }
122137} ;
123138
124- export const snippetConversationalWithImage = ( model : ModelDataMinimal , accessToken : string ) : InferenceSnippet => ( {
125- content : `from huggingface_hub import InferenceClient
126-
127- client = InferenceClient(api_key="${ accessToken || "{API_TOKEN}" } ")
128-
129- image_url = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"
130-
131- for message in client.chat_completion(
132- model="${ model . id } ",
133- messages=[
134- {
135- "role": "user",
136- "content": [
137- {"type": "image_url", "image_url": {"url": image_url}},
138- {"type": "text", "text": "Describe this image in one sentence."},
139- ],
140- }
141- ],
142- max_tokens=500,
143- stream=True,
144- ):
145- print(message.choices[0].delta.content, end="")` ,
146- } ) ;
147-
148139export const snippetZeroShotClassification = ( model : ModelDataMinimal ) : InferenceSnippet => ( {
149140 content : `def query(payload):
150141 response = requests.post(API_URL, headers=headers, json=payload)
@@ -282,7 +273,7 @@ export const pythonSnippets: Partial<
282273 "feature-extraction" : snippetBasic ,
283274 "text-generation" : snippetBasic ,
284275 "text2text-generation" : snippetBasic ,
285- "image-text-to-text" : snippetConversationalWithImage ,
276+ "image-text-to-text" : snippetConversational ,
286277 "fill-mask" : snippetBasic ,
287278 "sentence-similarity" : snippetBasic ,
288279 "automatic-speech-recognition" : snippetFile ,
@@ -306,12 +297,9 @@ export function getPythonInferenceSnippet(
306297 accessToken : string ,
307298 opts ?: Record < string , unknown >
308299) : InferenceSnippet | InferenceSnippet [ ] {
309- if ( model . pipeline_tag === "text-generation" && model . tags . includes ( "conversational" ) ) {
300+ if ( model . tags . includes ( "conversational" ) ) {
310301 // Conversational model detected, so we display a code snippet that features the Messages API
311302 return snippetConversational ( model , accessToken , opts ) ;
312- } else if ( model . pipeline_tag === "image-text-to-text" && model . tags . includes ( "conversational" ) ) {
313- // Example sending an image to the Message API
314- return snippetConversationalWithImage ( model , accessToken ) ;
315303 } else {
316304 let snippets =
317305 model . pipeline_tag && model . pipeline_tag in pythonSnippets
0 commit comments