@@ -13,6 +13,7 @@ import { makeRequestOptionsFromResolvedModel } from "../lib/makeRequestOptions.j
1313import type { InferenceProviderMappingEntry , InferenceProviderOrPolicy , InferenceTask , RequestArgs } from "../types.js" ;
1414import { templates } from "./templates.exported.js" ;
1515import { getLogger } from "../lib/logger.js" ;
16+ import { HF_ROUTER_AUTO_ENDPOINT } from "../config.js" ;
1617
1718export type InferenceSnippetOptions = {
1819 streaming ?: boolean ;
@@ -37,7 +38,7 @@ const CLIENTS: Record<InferenceSnippetLanguage, Client[]> = {
3738
3839const CLIENTS_AUTO_POLICY : Partial < Record < InferenceSnippetLanguage , Client [ ] > > = {
3940 js : [ "huggingface.js" ] ,
40- python : [ "huggingface_hub" ] ,
41+ python : [ "huggingface_hub" , "openai" ] ,
4142} ;
4243
4344type InputPreparationFn = ( model : ModelDataMinimal , opts ?: Record < string , unknown > ) => object ;
@@ -179,7 +180,7 @@ const snippetGenerator = (templateName: string, inputPreparationFn?: InputPrepar
179180 {
180181 accessToken : accessTokenOrPlaceholder ,
181182 provider,
182- endpointUrl : opts ?. endpointUrl ,
183+ endpointUrl : opts ?. endpointUrl ?? ( provider === "auto" ? HF_ROUTER_AUTO_ENDPOINT : undefined ) ,
183184 ...inputs ,
184185 } as RequestArgs ,
185186 inferenceProviderMapping ,
0 commit comments