Skip to content

Commit 0257edf

Browse files
committed
move docs up into client
1 parent 11e09d3 commit 0257edf

File tree

2 files changed

+56
-58
lines changed

2 files changed

+56
-58
lines changed

src/replicate/_client.py

Lines changed: 56 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -201,7 +201,34 @@ def run(
201201
wait: Union[int, bool, NotGiven] = NOT_GIVEN,
202202
**params: Unpack[PredictionCreateParamsWithoutVersion],
203203
) -> Any:
204-
"""Run a model and wait for its output."""
204+
"""
205+
Run a model prediction.
206+
207+
Args:
208+
ref: Reference to the model or version to run. Can be:
209+
- A string containing a version ID (e.g. "5c7d5dc6dd8bf75c1acaa8565735e7986bc5b66206b55cca93cb72c9bf15ccaa")
210+
- A string with owner/name format (e.g. "replicate/hello-world")
211+
- A string with owner/name:version format (e.g. "replicate/hello-world:5c7d5dc6...")
212+
- A Model instance with owner and name attributes
213+
- A Version instance with id attribute
214+
- A ModelVersionIdentifier dictionary with owner, name, and/or version keys
215+
file_encoding_strategy: Strategy for encoding file inputs, options are "base64" or "url"
216+
use_file_output: If True (default), convert output URLs to FileOutput objects
217+
wait: If True (default), wait for the prediction to complete. If False, return immediately.
218+
If an integer, wait up to that many seconds.
219+
**params: Additional parameters to pass to the prediction creation endpoint including
220+
the required "input" dictionary with model-specific parameters
221+
222+
Returns:
223+
The prediction output, which could be a basic type (str, int, etc.), a FileOutput object,
224+
a list of FileOutput objects, or a dictionary of FileOutput objects, depending on what
225+
the model returns.
226+
227+
Raises:
228+
ModelError: If the model run fails
229+
ValueError: If the reference format is invalid
230+
TypeError: If both wait and prefer parameters are provided
231+
"""
205232
from .lib._predictions import run
206233

207234
return run(
@@ -442,7 +469,34 @@ async def run(
442469
wait: Union[int, bool, NotGiven] = NOT_GIVEN,
443470
**params: Unpack[PredictionCreateParamsWithoutVersion],
444471
) -> Any:
445-
"""Run a model and wait for its output."""
472+
"""
473+
Run a model prediction asynchronously.
474+
475+
Args:
476+
ref: Reference to the model or version to run. Can be:
477+
- A string containing a version ID (e.g. "5c7d5dc6dd8bf75c1acaa8565735e7986bc5b66206b55cca93cb72c9bf15ccaa")
478+
- A string with owner/name format (e.g. "replicate/hello-world")
479+
- A string with owner/name:version format (e.g. "replicate/hello-world:5c7d5dc6...")
480+
- A Model instance with owner and name attributes
481+
- A Version instance with id attribute
482+
- A ModelVersionIdentifier dictionary with owner, name, and/or version keys
483+
use_file_output: If True (default), convert output URLs to AsyncFileOutput objects
484+
file_encoding_strategy: Strategy for encoding file inputs, options are "base64" or "url"
485+
wait: If True (default), wait for the prediction to complete. If False, return immediately.
486+
If an integer, wait up to that many seconds.
487+
**params: Additional parameters to pass to the prediction creation endpoint including
488+
the required "input" dictionary with model-specific parameters
489+
490+
Returns:
491+
The prediction output, which could be a basic type (str, int, etc.), an AsyncFileOutput object,
492+
a list of AsyncFileOutput objects, or a dictionary of AsyncFileOutput objects, depending on what
493+
the model returns.
494+
495+
Raises:
496+
ModelError: If the model run fails
497+
ValueError: If the reference format is invalid
498+
TypeError: If both wait and prefer parameters are provided
499+
"""
446500
from .lib._predictions import async_run
447501

448502
return await async_run(

src/replicate/lib/_predictions.py

Lines changed: 0 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -30,34 +30,6 @@ def run(
3030
file_encoding_strategy: Optional["FileEncodingStrategy"] = None,
3131
**params: Unpack[PredictionCreateParamsWithoutVersion],
3232
) -> PredictionOutput | FileOutput | Iterable[FileOutput] | Dict[str, FileOutput]:
33-
"""
34-
Run a model prediction.
35-
36-
Args:
37-
client: The Replicate instance to use for API calls
38-
ref: Reference to the model or version to run. Can be:
39-
- A string containing a version ID (e.g. "5c7d5dc6dd8bf75c1acaa8565735e7986bc5b66206b55cca93cb72c9bf15ccaa")
40-
- A string with owner/name format (e.g. "replicate/hello-world")
41-
- A string with owner/name/version format (e.g. "replicate/hello-world/5c7d5dc6...")
42-
- A Model instance with owner and name attributes
43-
- A Version instance with id attribute
44-
- A ModelVersionIdentifier dictionary with owner, name, and/or version keys
45-
input: Dictionary of input parameters for the model
46-
wait: If True (default), wait for the prediction to complete. If False, return immediately.
47-
If an integer, wait up to that many seconds.
48-
use_file_output: If True (default), convert output URLs to FileOutput objects
49-
**params: Additional parameters to pass to the prediction creation endpoint
50-
51-
Returns:
52-
The prediction output, which could be a basic type (str, int, etc.), a FileOutput object,
53-
a list of FileOutput objects, or a dictionary of FileOutput objects, depending on what
54-
the model returns.
55-
56-
Raises:
57-
ModelError: If the model run fails
58-
ValueError: If the reference format is invalid
59-
TypeError: If both wait and prefer parameters are provided
60-
"""
6133
from ._files import transform_output
6234

6335
if is_given(wait) and "prefer" in params:
@@ -136,34 +108,6 @@ async def async_run(
136108
use_file_output: Optional[bool] = True,
137109
**params: Unpack[PredictionCreateParamsWithoutVersion],
138110
) -> PredictionOutput | FileOutput | Iterable[FileOutput] | Dict[str, FileOutput]:
139-
"""
140-
Run a model prediction asynchronously.
141-
142-
Args:
143-
client: The AsyncReplicate instance to use for API calls
144-
ref: Reference to the model or version to run. Can be:
145-
- A string containing a version ID (e.g. "5c7d5dc6dd8bf75c1acaa8565735e7986bc5b66206b55cca93cb72c9bf15ccaa")
146-
- A string with owner/name format (e.g. "replicate/hello-world")
147-
- A string with owner/name/version format (e.g. "replicate/hello-world/5c7d5dc6...")
148-
- A Model instance with owner and name attributes
149-
- A Version instance with id attribute
150-
- A ModelVersionIdentifier dictionary with owner, name, and/or version keys
151-
input: Dictionary of input parameters for the model
152-
wait: If True (default), wait for the prediction to complete. If False, return immediately.
153-
If an integer, wait up to that many seconds.
154-
use_file_output: If True (default), convert output URLs to AsyncFileOutput objects
155-
**params: Additional parameters to pass to the prediction creation endpoint
156-
157-
Returns:
158-
The prediction output, which could be a basic type (str, int, etc.), an AsyncFileOutput object,
159-
a list of AsyncFileOutput objects, or a dictionary of AsyncFileOutput objects, depending on what
160-
the model returns.
161-
162-
Raises:
163-
ModelError: If the model run fails
164-
ValueError: If the reference format is invalid
165-
TypeError: If both wait and prefer parameters are provided
166-
"""
167111
from ._files import transform_output
168112

169113
if is_given(wait) and "prefer" in params:

0 commit comments

Comments
 (0)