|
1 | | -import base64 |
2 | | -from abc import ABC |
3 | | -from typing import Any, Dict, Optional, Union |
| 1 | +from typing import Any, Optional, Union |
4 | 2 |
|
5 | | -from huggingface_hub.hf_api import InferenceProviderMapping |
6 | 3 | from huggingface_hub.inference._common import RequestParameters, _as_dict |
7 | | -from huggingface_hub.inference._providers._common import ( |
8 | | - TaskProviderHelper, |
9 | | - filter_none, |
10 | | -) |
| 4 | +from huggingface_hub.inference._providers._common import BaseConversationalTask, BaseTextGenerationTask |
| 5 | + |
11 | 6 |
|
12 | 7 | _PROVIDER = "ovhcloud" |
13 | 8 | _BASE_URL = "https://oai.endpoints.kepler.ai.cloud.ovh.net" |
14 | 9 |
|
15 | | -class OVHcloudAIEndpointsTask(TaskProviderHelper, ABC): |
16 | | - def __init__(self, task: str): |
17 | | - super().__init__(provider=_PROVIDER, base_url=_BASE_URL, task=task) |
18 | | - |
19 | | - def _prepare_route(self, mapped_model: str, api_key: str) -> str: |
20 | | - if self.task == "text-to-image": |
21 | | - return "/v1/images/generations" |
22 | | - elif self.task == "conversational": |
23 | | - return "/v1/chat/completions" |
24 | | - elif self.task == "feature-extraction": |
25 | | - return "/v1/embeddings" |
26 | | - elif self.task == "automatic-speech-recognition": |
27 | | - return "/v1/audio/transcriptions" |
28 | | - raise ValueError(f"Unsupported task '{self.task}' for OVHcloud AI Endpoints.") |
29 | | - |
30 | | - def _prepare_payload_as_dict( |
31 | | - self, messages: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping |
32 | | - ) -> Optional[Dict]: |
33 | | - return {"messages": messages, "model": provider_mapping_info.provider_id, **filter_none(parameters)} |
34 | | - |
35 | | - |
36 | | -class OVHcloudAIEndpointsConversationalTask(OVHcloudAIEndpointsTask): |
37 | | - def __init__(self): |
38 | | - super().__init__("conversational") |
39 | | - |
40 | | - def _prepare_payload_as_dict( |
41 | | - self, messages: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping |
42 | | - ) -> Optional[dict]: |
43 | | - return super()._prepare_payload_as_dict(messages, parameters, provider_mapping_info) |
44 | | - |
45 | 10 |
|
46 | | -class OVHcloudAIEndpointsTextToImageTask(OVHcloudAIEndpointsTask): |
| 11 | +class OVHcloudAIEndpointsConversationalTask(BaseConversationalTask): |
47 | 12 | def __init__(self): |
48 | | - super().__init__("text-to-image") |
| 13 | + super().__init__(provider=_PROVIDER, base_url=_BASE_URL) |
49 | 14 |
|
50 | | - def _prepare_payload_as_dict( |
51 | | - self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping |
52 | | - ) -> Optional[dict]: |
53 | | - mapped_model = provider_mapping_info.provider_id |
54 | | - return {"prompt": inputs, "model": mapped_model, **filter_none(parameters)} |
| 15 | + def _prepare_route(self, mapped_model: str, api_key: str) -> str: |
| 16 | + return "/v1/chat/completions" |
55 | 17 |
|
56 | | - def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any: |
57 | | - response_dict = _as_dict(response) |
58 | | - return base64.b64decode(response_dict["data"][0]["b64_json"]) |
59 | | - |
60 | | -class OVHcloudAIEndpointsFeatureExtractionTask(OVHcloudAIEndpointsTask): |
61 | | - def __init__(self): |
62 | | - super().__init__("feature-extraction") |
63 | 18 |
|
64 | | - def _prepare_payload_as_dict( |
65 | | - self, inputs: Any, parameters: Dict, provider_mapping_info: InferenceProviderMapping |
66 | | - ) -> Optional[Dict]: |
67 | | - return {"input": inputs, "model": provider_mapping_info.provider_id, **filter_none(parameters)} |
68 | | - |
69 | | - def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any: |
70 | | - embeddings = _as_dict(response)["data"] |
71 | | - return [embedding["embedding"] for embedding in embeddings] |
72 | | - |
73 | | -class OVHcloudAIEndpointsAutomaticSpeechRecognitionTask(OVHcloudAIEndpointsTask): |
| 19 | +class OVHcloudAIEndpointsTextGenerationTask(BaseTextGenerationTask): |
74 | 20 | def __init__(self): |
75 | | - super().__init__("automatic-speech-recognition") |
| 21 | + super().__init__(provider=_PROVIDER, base_url=_BASE_URL) |
76 | 22 |
|
77 | | - def _prepare_payload_as_dict( |
78 | | - self, inputs: Any, parameters: dict, provider_mapping_info: InferenceProviderMapping |
79 | | - ) -> Optional[dict]: |
80 | | - return {"file": inputs, "model": provider_mapping_info.provider_id, **filter_none(parameters)} |
| 23 | + def _prepare_route(self, mapped_model: str, api_key: str) -> str: |
| 24 | + return "/v1/chat/completions" |
81 | 25 |
|
82 | 26 | def get_response(self, response: Union[bytes, dict], request_params: Optional[RequestParameters] = None) -> Any: |
83 | | - response_dict = _as_dict(response) |
84 | | - return response_dict["text"] |
| 27 | + output = _as_dict(response)["choices"][0] |
| 28 | + return { |
| 29 | + "generated_text": output["text"], |
| 30 | + "details": { |
| 31 | + "finish_reason": output.get("finish_reason"), |
| 32 | + "seed": output.get("seed"), |
| 33 | + }, |
| 34 | + } |
0 commit comments