22
33from __future__ import annotations
44
5+ from typing import List
6+ from typing_extensions import Literal
7+
58import httpx
69
10+ from ..types import model_list_params
711from .._types import NOT_GIVEN , Body , Query , Headers , NotGiven
12+ from .._utils import maybe_transform , async_maybe_transform
813from .._compat import cached_property
914from .._resource import SyncAPIResource , AsyncAPIResource
1015from .._response import (
1318 async_to_raw_response_wrapper ,
1419 async_to_streamed_response_wrapper ,
1520)
16- from ..types .model import Model
1721from .._base_client import make_request_options
1822from ..types .model_list_response import ModelListResponse
1923
@@ -40,22 +44,52 @@ def with_streaming_response(self) -> ModelsResourceWithStreamingResponse:
4044 """
4145 return ModelsResourceWithStreamingResponse (self )
4246
43- def retrieve (
47+ def list (
4448 self ,
45- model : str ,
4649 * ,
50+ page : int | NotGiven = NOT_GIVEN ,
51+ per_page : int | NotGiven = NOT_GIVEN ,
52+ public_only : bool | NotGiven = NOT_GIVEN ,
53+ usecases : List [
54+ Literal [
55+ "MODEL_USECASE_UNKNOWN" ,
56+ "MODEL_USECASE_AGENT" ,
57+ "MODEL_USECASE_FINETUNED" ,
58+ "MODEL_USECASE_KNOWLEDGEBASE" ,
59+ "MODEL_USECASE_GUARDRAIL" ,
60+ "MODEL_USECASE_REASONING" ,
61+ "MODEL_USECASE_SERVERLESS" ,
62+ ]
63+ ]
64+ | NotGiven = NOT_GIVEN ,
4765 # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
4866 # The extra values given here take precedence over values defined on the client or passed to this method.
4967 extra_headers : Headers | None = None ,
5068 extra_query : Query | None = None ,
5169 extra_body : Body | None = None ,
5270 timeout : float | httpx .Timeout | None | NotGiven = NOT_GIVEN ,
53- ) -> Model :
71+ ) -> ModelListResponse :
5472 """
55- Retrieves a model instance, providing basic information about the model such as
56- the owner and permissioning.
73+ To list all models, send a GET request to `/v2/gen-ai/models`.
5774
5875 Args:
76+ page: page number.
77+
78+ per_page: items per page.
79+
80+ public_only: only include models that are publicly available.
81+
82+ usecases: include only models defined for the listed usecases.
83+
84+ - MODEL_USECASE_UNKNOWN: The use case of the model is unknown
85+ - MODEL_USECASE_AGENT: The model maybe used in an agent
86+ - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning
87+ - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases
88+ (embedding models)
89+ - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails
90+ - MODEL_USECASE_REASONING: The model usecase for reasoning
91+ - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference
92+
5993 extra_headers: Send extra headers
6094
6195 extra_query: Add additional query parameters to the request
@@ -64,36 +98,24 @@ def retrieve(
6498
6599 timeout: Override the client-level default timeout for this request, in seconds
66100 """
67- if not model :
68- raise ValueError (f"Expected a non-empty value for `model` but received { model !r} " )
69101 return self ._get (
70- f"/models/ { model } "
102+ "/v2/gen-ai/models "
71103 if self ._client ._base_url_overridden
72- else f "https://inference.do-ai.run/v1/models/ { model } " ,
104+ else "https://api.digitalocean.com/v2/gen-ai/models " ,
73105 options = make_request_options (
74- extra_headers = extra_headers , extra_query = extra_query , extra_body = extra_body , timeout = timeout
75- ),
76- cast_to = Model ,
77- )
78-
79- def list (
80- self ,
81- * ,
82- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
83- # The extra values given here take precedence over values defined on the client or passed to this method.
84- extra_headers : Headers | None = None ,
85- extra_query : Query | None = None ,
86- extra_body : Body | None = None ,
87- timeout : float | httpx .Timeout | None | NotGiven = NOT_GIVEN ,
88- ) -> ModelListResponse :
89- """
90- Lists the currently available models, and provides basic information about each
91- one such as the owner and availability.
92- """
93- return self ._get (
94- "/models" if self ._client ._base_url_overridden else "https://inference.do-ai.run/v1/models" ,
95- options = make_request_options (
96- extra_headers = extra_headers , extra_query = extra_query , extra_body = extra_body , timeout = timeout
106+ extra_headers = extra_headers ,
107+ extra_query = extra_query ,
108+ extra_body = extra_body ,
109+ timeout = timeout ,
110+ query = maybe_transform (
111+ {
112+ "page" : page ,
113+ "per_page" : per_page ,
114+ "public_only" : public_only ,
115+ "usecases" : usecases ,
116+ },
117+ model_list_params .ModelListParams ,
118+ ),
97119 ),
98120 cast_to = ModelListResponse ,
99121 )
@@ -119,22 +141,52 @@ def with_streaming_response(self) -> AsyncModelsResourceWithStreamingResponse:
119141 """
120142 return AsyncModelsResourceWithStreamingResponse (self )
121143
122- async def retrieve (
144+ async def list (
123145 self ,
124- model : str ,
125146 * ,
147+ page : int | NotGiven = NOT_GIVEN ,
148+ per_page : int | NotGiven = NOT_GIVEN ,
149+ public_only : bool | NotGiven = NOT_GIVEN ,
150+ usecases : List [
151+ Literal [
152+ "MODEL_USECASE_UNKNOWN" ,
153+ "MODEL_USECASE_AGENT" ,
154+ "MODEL_USECASE_FINETUNED" ,
155+ "MODEL_USECASE_KNOWLEDGEBASE" ,
156+ "MODEL_USECASE_GUARDRAIL" ,
157+ "MODEL_USECASE_REASONING" ,
158+ "MODEL_USECASE_SERVERLESS" ,
159+ ]
160+ ]
161+ | NotGiven = NOT_GIVEN ,
126162 # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
127163 # The extra values given here take precedence over values defined on the client or passed to this method.
128164 extra_headers : Headers | None = None ,
129165 extra_query : Query | None = None ,
130166 extra_body : Body | None = None ,
131167 timeout : float | httpx .Timeout | None | NotGiven = NOT_GIVEN ,
132- ) -> Model :
168+ ) -> ModelListResponse :
133169 """
134- Retrieves a model instance, providing basic information about the model such as
135- the owner and permissioning.
170+ To list all models, send a GET request to `/v2/gen-ai/models`.
136171
137172 Args:
173+ page: page number.
174+
175+ per_page: items per page.
176+
177+ public_only: only include models that are publicly available.
178+
179+ usecases: include only models defined for the listed usecases.
180+
181+ - MODEL_USECASE_UNKNOWN: The use case of the model is unknown
182+ - MODEL_USECASE_AGENT: The model maybe used in an agent
183+ - MODEL_USECASE_FINETUNED: The model maybe used for fine tuning
184+ - MODEL_USECASE_KNOWLEDGEBASE: The model maybe used for knowledge bases
185+ (embedding models)
186+ - MODEL_USECASE_GUARDRAIL: The model maybe used for guardrails
187+ - MODEL_USECASE_REASONING: The model usecase for reasoning
188+ - MODEL_USECASE_SERVERLESS: The model usecase for serverless inference
189+
138190 extra_headers: Send extra headers
139191
140192 extra_query: Add additional query parameters to the request
@@ -143,36 +195,24 @@ async def retrieve(
143195
144196 timeout: Override the client-level default timeout for this request, in seconds
145197 """
146- if not model :
147- raise ValueError (f"Expected a non-empty value for `model` but received { model !r} " )
148198 return await self ._get (
149- f"/models/ { model } "
199+ "/v2/gen-ai/models "
150200 if self ._client ._base_url_overridden
151- else f"https://inference.do-ai.run/v1/models/{ model } " ,
152- options = make_request_options (
153- extra_headers = extra_headers , extra_query = extra_query , extra_body = extra_body , timeout = timeout
154- ),
155- cast_to = Model ,
156- )
157-
158- async def list (
159- self ,
160- * ,
161- # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
162- # The extra values given here take precedence over values defined on the client or passed to this method.
163- extra_headers : Headers | None = None ,
164- extra_query : Query | None = None ,
165- extra_body : Body | None = None ,
166- timeout : float | httpx .Timeout | None | NotGiven = NOT_GIVEN ,
167- ) -> ModelListResponse :
168- """
169- Lists the currently available models, and provides basic information about each
170- one such as the owner and availability.
171- """
172- return await self ._get (
173- "/models" if self ._client ._base_url_overridden else "https://inference.do-ai.run/v1/models" ,
201+ else "https://api.digitalocean.com/v2/gen-ai/models" ,
174202 options = make_request_options (
175- extra_headers = extra_headers , extra_query = extra_query , extra_body = extra_body , timeout = timeout
203+ extra_headers = extra_headers ,
204+ extra_query = extra_query ,
205+ extra_body = extra_body ,
206+ timeout = timeout ,
207+ query = await async_maybe_transform (
208+ {
209+ "page" : page ,
210+ "per_page" : per_page ,
211+ "public_only" : public_only ,
212+ "usecases" : usecases ,
213+ },
214+ model_list_params .ModelListParams ,
215+ ),
176216 ),
177217 cast_to = ModelListResponse ,
178218 )
@@ -182,9 +222,6 @@ class ModelsResourceWithRawResponse:
182222 def __init__ (self , models : ModelsResource ) -> None :
183223 self ._models = models
184224
185- self .retrieve = to_raw_response_wrapper (
186- models .retrieve ,
187- )
188225 self .list = to_raw_response_wrapper (
189226 models .list ,
190227 )
@@ -194,9 +231,6 @@ class AsyncModelsResourceWithRawResponse:
194231 def __init__ (self , models : AsyncModelsResource ) -> None :
195232 self ._models = models
196233
197- self .retrieve = async_to_raw_response_wrapper (
198- models .retrieve ,
199- )
200234 self .list = async_to_raw_response_wrapper (
201235 models .list ,
202236 )
@@ -206,9 +240,6 @@ class ModelsResourceWithStreamingResponse:
206240 def __init__ (self , models : ModelsResource ) -> None :
207241 self ._models = models
208242
209- self .retrieve = to_streamed_response_wrapper (
210- models .retrieve ,
211- )
212243 self .list = to_streamed_response_wrapper (
213244 models .list ,
214245 )
@@ -218,9 +249,6 @@ class AsyncModelsResourceWithStreamingResponse:
218249 def __init__ (self , models : AsyncModelsResource ) -> None :
219250 self ._models = models
220251
221- self .retrieve = async_to_streamed_response_wrapper (
222- models .retrieve ,
223- )
224252 self .list = async_to_streamed_response_wrapper (
225253 models .list ,
226254 )
0 commit comments