diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index b06ba91..6b7b74c 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "0.2.1"
+ ".": "0.3.0"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index d7a638a..a1ffded 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
configured_endpoints: 35
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/replicate%2Freplicate-client-efbc8cc2d74644b213e161d3e11e0589d1cef181fb318ea02c8eb6b00f245713.yml
-openapi_spec_hash: 13da0c06c900b61cd98ab678e024987a
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/replicate%2Freplicate-client-88cf5fe1f5accb56ae9fbb31c0df00d1552762d4c558d16d8547894ae95e8ccb.yml
+openapi_spec_hash: 43283d20f335a04241cce165452ff50e
config_hash: 84794ed69d841684ff08a8aa889ef103
diff --git a/CHANGELOG.md b/CHANGELOG.md
index bbcfb68..f8e12c3 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,13 @@
# Changelog
+## 0.3.0 (2025-05-08)
+
+Full Changelog: [v0.2.1...v0.3.0](https://github.com/replicate/replicate-python-stainless/compare/v0.2.1...v0.3.0)
+
+### Features
+
+* **api:** api update ([0e4a103](https://github.com/replicate/replicate-python-stainless/commit/0e4a10391bebf0cae929c8d11ccd7415d1785500))
+
## 0.2.1 (2025-05-07)
Full Changelog: [v0.2.0...v0.2.1](https://github.com/replicate/replicate-python-stainless/compare/v0.2.0...v0.2.1)
diff --git a/api.md b/api.md
index 408f49a..e3acbf5 100644
--- a/api.md
+++ b/api.md
@@ -116,7 +116,7 @@ Methods:
- client.predictions.create(\*\*params) -> Prediction
- client.predictions.list(\*\*params) -> SyncCursorURLPageWithCreatedFilters[Prediction]
-- client.predictions.cancel(\*, prediction_id) -> None
+- client.predictions.cancel(\*, prediction_id) -> Prediction
- client.predictions.get(\*, prediction_id) -> Prediction
# Trainings
diff --git a/examples/run_a_model.py b/examples/run_a_model.py
new file mode 100644
index 0000000..cbb1106
--- /dev/null
+++ b/examples/run_a_model.py
@@ -0,0 +1,12 @@
+import rich
+
+import replicate
+
+outputs = replicate.run(
+ "black-forest-labs/flux-schnell",
+ input={"prompt": "astronaut riding a rocket like a horse"},
+)
+rich.print(outputs)
+for index, output in enumerate(outputs):
+ with open(f"output_{index}.webp", "wb") as file:
+ file.write(output.read())
diff --git a/examples/run_async.py b/examples/run_async.py
new file mode 100644
index 0000000..1c93592
--- /dev/null
+++ b/examples/run_async.py
@@ -0,0 +1,20 @@
+import asyncio
+
+from replicate import AsyncReplicate
+
+replicate = AsyncReplicate()
+
+# https://replicate.com/stability-ai/sdxl
+model_version = "stability-ai/sdxl:39ed52f2a78e934b3ba6e2a89f5b1c712de7dfea535525255b1aa35c5565e08b"
+prompts = [f"A chariot pulled by a team of {count} rainbow unicorns" for count in ["two", "four", "six", "eight"]]
+
+
+async def main() -> None:
+ # Create tasks with asyncio.gather directly
+ tasks = [replicate.run(model_version, input={"prompt": prompt}) for prompt in prompts]
+
+ results = await asyncio.gather(*tasks)
+ print(results)
+
+
+asyncio.run(main())
diff --git a/pyproject.toml b/pyproject.toml
index c6fc012..92cef80 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "replicate-stainless"
-version = "0.2.1"
+version = "0.3.0"
description = "The official Python library for the replicate API"
dynamic = ["readme"]
license = "Apache-2.0"
diff --git a/src/replicate/__init__.py b/src/replicate/__init__.py
index 7a38cc0..c0695ad 100644
--- a/src/replicate/__init__.py
+++ b/src/replicate/__init__.py
@@ -22,6 +22,7 @@
from ._version import __title__, __version__
from ._response import APIResponse as APIResponse, AsyncAPIResponse as AsyncAPIResponse
from ._constants import DEFAULT_TIMEOUT, DEFAULT_MAX_RETRIES, DEFAULT_CONNECTION_LIMITS
+from .lib._files import FileOutput as FileOutput, AsyncFileOutput as AsyncFileOutput
from ._exceptions import (
APIError,
ConflictError,
@@ -38,6 +39,7 @@
UnprocessableEntityError,
APIResponseValidationError,
)
+from .lib._models import Model as Model, Version as Version, ModelVersionIdentifier as ModelVersionIdentifier
from ._base_client import DefaultHttpxClient, DefaultAsyncHttpxClient
from ._utils._logs import setup_logging as _setup_logging
@@ -80,6 +82,11 @@
"DEFAULT_CONNECTION_LIMITS",
"DefaultHttpxClient",
"DefaultAsyncHttpxClient",
+ "FileOutput",
+ "AsyncFileOutput",
+ "Model",
+ "Version",
+ "ModelVersionIdentifier",
]
_setup_logging()
@@ -230,6 +237,7 @@ def _reset_client() -> None: # type: ignore[reportUnusedFunction]
from ._module_client import (
+ run as run,
files as files,
models as models,
account as account,
diff --git a/src/replicate/_client.py b/src/replicate/_client.py
index 0fe93c6..7c6582a 100644
--- a/src/replicate/_client.py
+++ b/src/replicate/_client.py
@@ -3,11 +3,15 @@
from __future__ import annotations
import os
-from typing import TYPE_CHECKING, Any, Union, Mapping
-from typing_extensions import Self, override
+from typing import TYPE_CHECKING, Any, Union, Mapping, Optional
+from typing_extensions import Self, Unpack, override
import httpx
+from replicate.lib._files import FileEncodingStrategy
+from replicate.lib._predictions import Model, Version, ModelVersionIdentifier
+from replicate.types.prediction_create_params import PredictionCreateParamsWithoutVersion
+
from . import _exceptions
from ._qs import Querystring
from ._types import (
@@ -171,6 +175,10 @@ def with_raw_response(self) -> ReplicateWithRawResponse:
def with_streaming_response(self) -> ReplicateWithStreamedResponse:
return ReplicateWithStreamedResponse(self)
+ @cached_property
+ def poll_interval(self) -> float:
+ return float(os.environ.get("REPLICATE_POLL_INTERVAL", "0.5"))
+
@property
@override
def qs(self) -> Querystring:
@@ -191,6 +199,54 @@ def default_headers(self) -> dict[str, str | Omit]:
**self._custom_headers,
}
+ def run(
+ self,
+ ref: Union[Model, Version, ModelVersionIdentifier, str],
+ *,
+ file_encoding_strategy: Optional["FileEncodingStrategy"] = None,
+ use_file_output: bool = True,
+ wait: Union[int, bool, NotGiven] = NOT_GIVEN,
+ **params: Unpack[PredictionCreateParamsWithoutVersion],
+ ) -> Any:
+ """
+ Run a model prediction.
+
+ Args:
+ ref: Reference to the model or version to run. Can be:
+ - A string containing a version ID (e.g. "5c7d5dc6dd8bf75c1acaa8565735e7986bc5b66206b55cca93cb72c9bf15ccaa")
+ - A string with owner/name format (e.g. "replicate/hello-world")
+ - A string with owner/name:version format (e.g. "replicate/hello-world:5c7d5dc6...")
+ - A Model instance with owner and name attributes
+ - A Version instance with id attribute
+ - A ModelVersionIdentifier dictionary with owner, name, and/or version keys
+ file_encoding_strategy: Strategy for encoding file inputs, options are "base64" or "url"
+ use_file_output: If True (default), convert output URLs to FileOutput objects
+ wait: If True (default), wait for the prediction to complete. If False, return immediately.
+ If an integer, wait up to that many seconds.
+ **params: Additional parameters to pass to the prediction creation endpoint including
+ the required "input" dictionary with model-specific parameters
+
+ Returns:
+ The prediction output, which could be a basic type (str, int, etc.), a FileOutput object,
+ a list of FileOutput objects, or a dictionary of FileOutput objects, depending on what
+ the model returns.
+
+ Raises:
+ ModelError: If the model run fails
+ ValueError: If the reference format is invalid
+ TypeError: If both wait and prefer parameters are provided
+ """
+ from .lib._predictions import run
+
+ return run(
+ self,
+ ref,
+ wait=wait,
+ use_file_output=use_file_output,
+ file_encoding_strategy=file_encoding_strategy,
+ **params,
+ )
+
def copy(
self,
*,
@@ -393,6 +449,10 @@ def with_raw_response(self) -> AsyncReplicateWithRawResponse:
def with_streaming_response(self) -> AsyncReplicateWithStreamedResponse:
return AsyncReplicateWithStreamedResponse(self)
+ @cached_property
+ def poll_interval(self) -> float:
+ return float(os.environ.get("REPLICATE_POLL_INTERVAL", "0.5"))
+
@property
@override
def qs(self) -> Querystring:
@@ -413,6 +473,54 @@ def default_headers(self) -> dict[str, str | Omit]:
**self._custom_headers,
}
+ async def run(
+ self,
+ ref: Union[Model, Version, ModelVersionIdentifier, str],
+ *,
+ use_file_output: bool = True,
+ file_encoding_strategy: Optional["FileEncodingStrategy"] = None,
+ wait: Union[int, bool, NotGiven] = NOT_GIVEN,
+ **params: Unpack[PredictionCreateParamsWithoutVersion],
+ ) -> Any:
+ """
+ Run a model prediction asynchronously.
+
+ Args:
+ ref: Reference to the model or version to run. Can be:
+ - A string containing a version ID (e.g. "5c7d5dc6dd8bf75c1acaa8565735e7986bc5b66206b55cca93cb72c9bf15ccaa")
+ - A string with owner/name format (e.g. "replicate/hello-world")
+ - A string with owner/name:version format (e.g. "replicate/hello-world:5c7d5dc6...")
+ - A Model instance with owner and name attributes
+ - A Version instance with id attribute
+ - A ModelVersionIdentifier dictionary with owner, name, and/or version keys
+ use_file_output: If True (default), convert output URLs to AsyncFileOutput objects
+ file_encoding_strategy: Strategy for encoding file inputs, options are "base64" or "url"
+ wait: If True (default), wait for the prediction to complete. If False, return immediately.
+ If an integer, wait up to that many seconds.
+ **params: Additional parameters to pass to the prediction creation endpoint including
+ the required "input" dictionary with model-specific parameters
+
+ Returns:
+ The prediction output, which could be a basic type (str, int, etc.), an AsyncFileOutput object,
+ a list of AsyncFileOutput objects, or a dictionary of AsyncFileOutput objects, depending on what
+ the model returns.
+
+ Raises:
+ ModelError: If the model run fails
+ ValueError: If the reference format is invalid
+ TypeError: If both wait and prefer parameters are provided
+ """
+ from .lib._predictions import async_run
+
+ return await async_run(
+ self,
+ ref,
+ wait=wait,
+ use_file_output=use_file_output,
+ file_encoding_strategy=file_encoding_strategy,
+ **params,
+ )
+
def copy(
self,
*,
diff --git a/src/replicate/_exceptions.py b/src/replicate/_exceptions.py
index 9fbb505..4b56c2b 100644
--- a/src/replicate/_exceptions.py
+++ b/src/replicate/_exceptions.py
@@ -6,6 +6,8 @@
import httpx
+from replicate.types.prediction import Prediction
+
__all__ = [
"BadRequestError",
"AuthenticationError",
@@ -15,6 +17,7 @@
"UnprocessableEntityError",
"RateLimitError",
"InternalServerError",
+ "ModelError",
]
@@ -106,3 +109,13 @@ class RateLimitError(APIStatusError):
class InternalServerError(APIStatusError):
pass
+
+
+class ModelError(ReplicateError):
+ """An error from user's code in a model."""
+
+ prediction: Prediction
+
+ def __init__(self, prediction: Prediction) -> None:
+ self.prediction = prediction
+ super().__init__(prediction.error)
diff --git a/src/replicate/_module_client.py b/src/replicate/_module_client.py
index 1d90cf0..a33c64d 100644
--- a/src/replicate/_module_client.py
+++ b/src/replicate/_module_client.py
@@ -3,7 +3,7 @@
from __future__ import annotations
from typing import TYPE_CHECKING
-from typing_extensions import override
+from typing_extensions import cast, override
if TYPE_CHECKING:
from .resources.files import FilesResource
@@ -74,6 +74,20 @@ def __load__(self) -> PredictionsResource:
return _load_client().predictions
+if TYPE_CHECKING:
+ from ._client import Replicate
+
+ # get the type checker to infer the run symbol to the same type
+ # as the method on the client so we don't have to define it twice
+ __client: Replicate = cast(Replicate, {})
+ run = __client.run
+else:
+
+ def _run(*args, **kwargs):
+ return _load_client().run(*args, **kwargs)
+
+ run = _run
+
files: FilesResource = FilesResourceProxy().__as_proxied__()
models: ModelsResource = ModelsResourceProxy().__as_proxied__()
account: AccountResource = AccountResourceProxy().__as_proxied__()
diff --git a/src/replicate/_version.py b/src/replicate/_version.py
index c82a2cd..2b5e4a0 100644
--- a/src/replicate/_version.py
+++ b/src/replicate/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "replicate"
-__version__ = "0.2.1" # x-release-please-version
+__version__ = "0.3.0" # x-release-please-version
diff --git a/src/replicate/lib/_files.py b/src/replicate/lib/_files.py
new file mode 100644
index 0000000..c14a944
--- /dev/null
+++ b/src/replicate/lib/_files.py
@@ -0,0 +1,237 @@
+from __future__ import annotations
+
+import io
+import base64
+import mimetypes
+from types import GeneratorType
+from typing import TYPE_CHECKING, Any, Literal, Iterator, Optional, AsyncIterator
+from pathlib import Path
+from typing_extensions import override
+
+import httpx
+
+from .._utils import is_mapping, is_sequence
+
+# Use TYPE_CHECKING to avoid circular imports
+if TYPE_CHECKING:
+ from .._client import Replicate, AsyncReplicate
+
+FileEncodingStrategy = Literal["base64", "url"]
+
+
+try:
+ import numpy as np # type: ignore
+
+ HAS_NUMPY = True
+except ImportError:
+ HAS_NUMPY = False # type: ignore
+
+
+# pylint: disable=too-many-return-statements
+def encode_json(
+ obj: Any, # noqa: ANN401
+ client: Replicate,
+ file_encoding_strategy: Optional["FileEncodingStrategy"] = None,
+) -> Any: # noqa: ANN401
+ """
+ Return a JSON-compatible version of the object.
+ """
+
+ if isinstance(obj, dict):
+ return {
+ key: encode_json(value, client, file_encoding_strategy)
+ for key, value in obj.items() # type: ignore
+ } # type: ignore
+ if isinstance(obj, (list, set, frozenset, GeneratorType, tuple)):
+ return [encode_json(value, client, file_encoding_strategy) for value in obj] # type: ignore
+ if isinstance(obj, Path):
+ with obj.open("rb") as file:
+ return encode_json(file, client, file_encoding_strategy)
+ if isinstance(obj, io.IOBase):
+ if file_encoding_strategy == "base64":
+ return base64_encode_file(obj)
+ else:
+ # todo: support files endpoint
+ # return client.files.create(obj).urls["get"]
+ raise NotImplementedError("File upload is not supported yet")
+ if HAS_NUMPY:
+ if isinstance(obj, np.integer): # type: ignore
+ return int(obj)
+ if isinstance(obj, np.floating): # type: ignore
+ return float(obj)
+ if isinstance(obj, np.ndarray): # type: ignore
+ return obj.tolist()
+ return obj
+
+
+async def async_encode_json(
+ obj: Any, # noqa: ANN401
+ client: AsyncReplicate,
+ file_encoding_strategy: Optional["FileEncodingStrategy"] = None,
+) -> Any: # noqa: ANN401
+ """
+ Asynchronously return a JSON-compatible version of the object.
+ """
+
+ if isinstance(obj, dict):
+ return {
+ key: (await async_encode_json(value, client, file_encoding_strategy))
+ for key, value in obj.items() # type: ignore
+ } # type: ignore
+ if isinstance(obj, (list, set, frozenset, GeneratorType, tuple)):
+ return [
+ (await async_encode_json(value, client, file_encoding_strategy))
+ for value in obj # type: ignore
+ ]
+ if isinstance(obj, Path):
+ with obj.open("rb") as file:
+ return await async_encode_json(file, client, file_encoding_strategy)
+ if isinstance(obj, io.IOBase):
+ if file_encoding_strategy == "base64":
+ # TODO: This should ideally use an async based file reader path.
+ return base64_encode_file(obj)
+ else:
+ # todo: support files endpoint
+ # return (await client.files.async_create(obj)).urls["get"]
+ raise NotImplementedError("File upload is not supported yet")
+ if HAS_NUMPY:
+ if isinstance(obj, np.integer): # type: ignore
+ return int(obj)
+ if isinstance(obj, np.floating): # type: ignore
+ return float(obj)
+ if isinstance(obj, np.ndarray): # type: ignore
+ return obj.tolist()
+ return obj
+
+
+def base64_encode_file(file: io.IOBase) -> str:
+ """
+ Base64 encode a file.
+
+ Args:
+ file: A file handle to upload.
+ Returns:
+ str: A base64-encoded data URI.
+ """
+
+ file.seek(0)
+ body = file.read()
+
+ # Ensure the file handle is in bytes
+ body = body.encode("utf-8") if isinstance(body, str) else body
+ encoded_body = base64.b64encode(body).decode("utf-8")
+
+ mime_type = mimetypes.guess_type(getattr(file, "name", ""))[0] or "application/octet-stream"
+ return f"data:{mime_type};base64,{encoded_body}"
+
+
+class FileOutput(httpx.SyncByteStream):
+ """
+ An object that can be used to read the contents of an output file
+ created by running a Replicate model.
+ """
+
+ url: str
+ """
+ The file URL.
+ """
+
+ _client: Replicate
+
+ def __init__(self, url: str, client: Replicate) -> None:
+ self.url = url
+ self._client = client
+
+ def read(self) -> bytes:
+ if self.url.startswith("data:"):
+ _, encoded = self.url.split(",", 1)
+ return base64.b64decode(encoded)
+
+ with self._client._client.stream("GET", self.url) as response:
+ response.raise_for_status()
+ return response.read()
+
+ @override
+ def __iter__(self) -> Iterator[bytes]:
+ if self.url.startswith("data:"):
+ yield self.read()
+ return
+
+ with self._client._client.stream("GET", self.url) as response:
+ response.raise_for_status()
+ yield from response.iter_bytes()
+
+ @override
+ def __str__(self) -> str:
+ return self.url
+
+ @override
+ def __repr__(self) -> str:
+ return f'{self.__class__.__name__}("{self.url}")'
+
+
+class AsyncFileOutput(httpx.AsyncByteStream):
+ """
+ An object that can be used to read the contents of an output file
+ created by running a Replicate model.
+ """
+
+ url: str
+ """
+ The file URL.
+ """
+
+ _client: AsyncReplicate
+
+ def __init__(self, url: str, client: AsyncReplicate) -> None:
+ self.url = url
+ self._client = client
+
+ async def read(self) -> bytes:
+ if self.url.startswith("data:"):
+ _, encoded = self.url.split(",", 1)
+ return base64.b64decode(encoded)
+
+ async with self._client._client.stream("GET", self.url) as response:
+ response.raise_for_status()
+ return await response.aread()
+
+ @override
+ async def __aiter__(self) -> AsyncIterator[bytes]:
+ if self.url.startswith("data:"):
+ yield await self.read()
+ return
+
+ async with self._client._client.stream("GET", self.url) as response:
+ response.raise_for_status()
+ async for chunk in response.aiter_bytes():
+ yield chunk
+
+ @override
+ def __str__(self) -> str:
+ return self.url
+
+ @override
+ def __repr__(self) -> str:
+ return f'{self.__class__.__name__}("{self.url}")'
+
+
+def transform_output(value: object, client: "Replicate | AsyncReplicate") -> Any:
+ """
+ Transform the output of a prediction to a `FileOutput` object if it's a URL.
+ """
+
+ def transform(obj: Any) -> Any:
+ if is_mapping(obj):
+ return {k: transform(v) for k, v in obj.items()}
+ elif is_sequence(obj) and not isinstance(obj, str):
+ return [transform(item) for item in obj]
+ elif isinstance(obj, str) and (obj.startswith("https:") or obj.startswith("data:")):
+ # Check if the client is async by looking for async in the class name
+ # we're doing this to avoid circular imports
+ if "Async" in client.__class__.__name__:
+ return AsyncFileOutput(obj, client) # type: ignore
+ return FileOutput(obj, client) # type: ignore
+ return obj
+
+ return transform(value)
diff --git a/src/replicate/lib/_models.py b/src/replicate/lib/_models.py
new file mode 100644
index 0000000..b98ee10
--- /dev/null
+++ b/src/replicate/lib/_models.py
@@ -0,0 +1,83 @@
+from __future__ import annotations
+
+import re
+from typing import Any, Dict, Tuple, Union, Optional, NamedTuple
+
+
+class Model:
+ """A Replicate model."""
+
+ def __init__(self, owner: str, name: str):
+ self.owner = owner
+ self.name = name
+
+
+import datetime
+
+from pydantic import BaseModel
+
+
+class Version(BaseModel):
+ """
+ A version of a model.
+ """
+
+ id: str
+ """The unique ID of the version."""
+
+ created_at: datetime.datetime
+ """When the version was created."""
+
+ cog_version: str
+ """The version of the Cog used to create the version."""
+
+ openapi_schema: Dict[str, Any]
+ """An OpenAPI description of the model inputs and outputs."""
+
+
+class ModelVersionIdentifier(NamedTuple):
+ """
+ A reference to a model version in the format owner/name or owner/name:version.
+ """
+
+ owner: str
+ name: str
+ version: Optional[str] = None
+
+ @classmethod
+ def parse(cls, ref: str) -> "ModelVersionIdentifier":
+ """
+ Split a reference in the format owner/name:version into its components.
+ """
+
+ match = re.match(r"^(?P[^/]+)/(?P[^/:]+)(:(?P.+))?$", ref)
+ if not match:
+ raise ValueError(f"Invalid reference to model version: {ref}. Expected format: owner/name:version")
+
+ return cls(match.group("owner"), match.group("name"), match.group("version"))
+
+
+def resolve_reference(
+ ref: Union[Model, Version, ModelVersionIdentifier, str],
+) -> Tuple[Optional[Version], Optional[str], Optional[str], Optional[str]]:
+ """
+ Resolve a reference to a model or version to its components.
+
+ Returns a tuple of (version, owner, name, version_id).
+ """
+ version = None
+ owner = None
+ name = None
+ version_id = None
+
+ if isinstance(ref, Model):
+ owner, name = ref.owner, ref.name
+ elif isinstance(ref, Version):
+ version = ref
+ version_id = ref.id
+ elif isinstance(ref, ModelVersionIdentifier):
+ owner, name, version_id = ref
+ else:
+ owner, name, version_id = ModelVersionIdentifier.parse(ref)
+
+ return version, owner, name, version_id
diff --git a/src/replicate/lib/_predictions.py b/src/replicate/lib/_predictions.py
new file mode 100644
index 0000000..6d47e9a
--- /dev/null
+++ b/src/replicate/lib/_predictions.py
@@ -0,0 +1,220 @@
+from __future__ import annotations
+
+import time
+from typing import TYPE_CHECKING, Any, Dict, List, Union, Iterable, Iterator, Optional
+from collections.abc import AsyncIterator
+from typing_extensions import Unpack
+
+from replicate.lib._files import FileEncodingStrategy
+from replicate.lib._schema import make_schema_backwards_compatible
+from replicate.types.prediction import Prediction
+from replicate.types.prediction_create_params import PredictionCreateParamsWithoutVersion
+
+from ..types import PredictionCreateParams
+from .._types import NOT_GIVEN, NotGiven
+from .._utils import is_given
+from ._models import Model, Version, ModelVersionIdentifier, resolve_reference
+from .._exceptions import ModelError
+
+if TYPE_CHECKING:
+ from ._files import FileOutput
+ from .._client import Replicate, AsyncReplicate
+
+
+def run(
+ client: "Replicate",
+ ref: Union[Model, Version, ModelVersionIdentifier, str],
+ *,
+ wait: Union[int, bool, NotGiven] = NOT_GIVEN,
+ use_file_output: Optional[bool] = True,
+ file_encoding_strategy: Optional["FileEncodingStrategy"] = None,
+ **params: Unpack[PredictionCreateParamsWithoutVersion],
+) -> object | FileOutput | Iterable[FileOutput] | Dict[str, FileOutput]:
+ from ._files import transform_output
+
+ if is_given(wait) and "prefer" in params:
+ raise TypeError("cannot mix and match prefer and wait")
+
+ if not is_given(wait):
+ wait = True
+
+ is_blocking = wait != False
+
+ if wait:
+ if wait is True:
+ params.setdefault("prefer", "wait")
+ else:
+ params.setdefault("prefer", f"wait={wait}")
+
+ # Resolve ref to its components
+ version, owner, name, version_id = resolve_reference(ref)
+
+ prediction = None
+ if version_id is not None:
+ # Create prediction with the specific version ID
+ params_with_version: PredictionCreateParams = {**params, "version": version_id}
+ prediction = client.predictions.create(file_encoding_strategy=file_encoding_strategy, **params_with_version)
+ elif owner and name:
+ # Create prediction via models resource with owner/name
+ prediction = client.models.predictions.create(
+ file_encoding_strategy=file_encoding_strategy, model_owner=owner, model_name=name, **params
+ )
+ else:
+ # If ref is a string but doesn't match expected patterns
+ if isinstance(ref, str):
+ params_with_version = {**params, "version": ref}
+ prediction = client.predictions.create(file_encoding_strategy=file_encoding_strategy, **params_with_version)
+ else:
+ raise ValueError(
+ f"Invalid reference format: {ref}. Expected a model name ('owner/name'), "
+ "a version ID, a Model object, a Version object, or a ModelVersionIdentifier."
+ )
+
+ # Currently the "Prefer: wait" interface will return a prediction with a status
+ # of "processing" rather than a terminal state because it returns before the
+ # prediction has been fully processed. If request exceeds the wait time, even if
+ # it is actually processing, the prediction will be in a "starting" state.
+ #
+ # We should fix this in the blocking API itself. Predictions that are done should
+ # be in a terminal state and predictions that are processing should be in state
+ # "processing".
+ in_terminal_state = is_blocking and prediction.status != "starting"
+ if not in_terminal_state:
+ # Return a "polling" iterator if the model has an output iterator array type.
+ if version and _has_output_iterator_array_type(version):
+ return (transform_output(chunk, client) for chunk in output_iterator(prediction=prediction, client=client))
+
+ prediction = client.predictions.wait(prediction.id)
+
+ if prediction.status == "failed":
+ raise ModelError(prediction)
+
+ # Return an iterator for the completed prediction when needed.
+ if version and _has_output_iterator_array_type(version) and prediction.output is not None:
+ return (transform_output(chunk, client) for chunk in prediction.output) # type: ignore
+
+ if use_file_output:
+ return transform_output(prediction.output, client) # type: ignore[no-any-return]
+
+ return prediction.output
+
+
+async def async_run(
+ client: "AsyncReplicate",
+ ref: Union[Model, Version, ModelVersionIdentifier, str],
+ *,
+ file_encoding_strategy: Optional["FileEncodingStrategy"] = None,
+ wait: Union[int, bool, NotGiven] = NOT_GIVEN,
+ use_file_output: Optional[bool] = True,
+ **params: Unpack[PredictionCreateParamsWithoutVersion],
+) -> object | FileOutput | Iterable[FileOutput] | Dict[str, FileOutput]:
+ from ._files import transform_output
+
+ if is_given(wait) and "prefer" in params:
+ raise TypeError("cannot mix and match prefer and wait")
+
+ if not is_given(wait):
+ wait = True
+
+ is_blocking = wait != False
+
+ if wait:
+ if wait is True:
+ params.setdefault("prefer", "wait")
+ else:
+ params.setdefault("prefer", f"wait={wait}")
+
+ # Resolve ref to its components
+ version, owner, name, version_id = resolve_reference(ref)
+
+ prediction = None
+ if version_id is not None:
+ # Create prediction with the specific version ID
+ params_with_version: PredictionCreateParams = {**params, "version": version_id}
+ prediction = await client.predictions.create(
+ file_encoding_strategy=file_encoding_strategy, **params_with_version
+ )
+ elif owner and name:
+ # Create prediction via models resource with owner/name
+ prediction = await client.models.predictions.create(
+ model_owner=owner, model_name=name, file_encoding_strategy=file_encoding_strategy, **params
+ )
+ else:
+ # If ref is a string but doesn't match expected patterns
+ if isinstance(ref, str):
+ params_with_version = {**params, "version": ref}
+ prediction = await client.predictions.create(
+ file_encoding_strategy=file_encoding_strategy, **params_with_version
+ )
+ else:
+ raise ValueError(
+ f"Invalid reference format: {ref}. Expected a model name ('owner/name'), "
+ "a version ID, a Model object, a Version object, or a ModelVersionIdentifier."
+ )
+
+ # Currently the "Prefer: wait" interface will return a prediction with a status
+ # of "processing" rather than a terminal state because it returns before the
+ # prediction has been fully processed. If request exceeds the wait time, even if
+ # it is actually processing, the prediction will be in a "starting" state.
+ #
+ # We should fix this in the blocking API itself. Predictions that are done should
+ # be in a terminal state and predictions that are processing should be in state
+ # "processing".
+ in_terminal_state = is_blocking and prediction.status != "starting"
+ if not in_terminal_state:
+ # Return a "polling" iterator if the model has an output iterator array type.
+ # if version and _has_output_iterator_array_type(version):
+ # return (
+ # transform_output(chunk, client)
+ # async for chunk in prediction.async_output_iterator()
+ # )
+
+ prediction = await client.predictions.wait(prediction.id)
+
+ if prediction.status == "failed":
+ raise ModelError(prediction)
+
+ # Return an iterator for completed output if the model has an output iterator array type.
+ if version and _has_output_iterator_array_type(version) and prediction.output is not None:
+ return (transform_output(chunk, client) async for chunk in _make_async_iterator(prediction.output)) # type: ignore
+ if use_file_output:
+ return transform_output(prediction.output, client) # type: ignore[no-any-return]
+
+ return prediction.output
+
+
+def _has_output_iterator_array_type(version: Version) -> bool:
+ schema = make_schema_backwards_compatible(version.openapi_schema, version.cog_version)
+ output = schema.get("components", {}).get("schemas", {}).get("Output", {})
+ return output.get("type") == "array" and output.get("x-cog-array-type") == "iterator" # type: ignore[no-any-return]
+
+
+async def _make_async_iterator(list: List[Any]) -> AsyncIterator[Any]:
+ for item in list:
+ yield item
+
+
+def output_iterator(prediction: Prediction, client: Replicate) -> Iterator[Any]:
+ """
+ Return an iterator of the prediction output.
+ """
+
+ # output can really be anything, but if we hit this then we know
+ # it should be a list of something!
+ if not isinstance(prediction.output, list):
+ raise TypeError(f"Expected prediction output to be a list, got {type(prediction.output)}")
+ previous_output: list[Any] = prediction.output or [] # type: ignore[union-attr]
+ while prediction.status not in ["succeeded", "failed", "canceled"]:
+ output: list[Any] = prediction.output or [] # type: ignore[union-attr]
+ new_output = output[len(previous_output) :]
+ yield from new_output
+ previous_output = output
+ time.sleep(client.poll_interval)
+ prediction = client.predictions.get(prediction_id=prediction.id)
+
+ if prediction.status == "failed":
+ raise ModelError(prediction=prediction)
+
+ output: list[Any] = prediction.output or [] # type: ignore
+ new_output = output[len(previous_output) :]
+ yield from new_output
diff --git a/src/replicate/lib/_schema.py b/src/replicate/lib/_schema.py
new file mode 100644
index 0000000..9d9ee0d
--- /dev/null
+++ b/src/replicate/lib/_schema.py
@@ -0,0 +1,27 @@
+from typing import Any, Dict, Optional
+
+from packaging import version
+
+# TODO: this code is shared with replicate's backend. Maybe we should put it in the Cog Python package as the source of truth?
+
+
+def version_has_no_array_type(cog_version: str) -> Optional[bool]:
+ """Iterators have x-cog-array-type=iterator in the schema from 0.3.9 onward"""
+ try:
+ return version.parse(cog_version) < version.parse("0.3.9")
+ except version.InvalidVersion:
+ return None
+
+
+def make_schema_backwards_compatible(
+ schema: Dict[str, Any],
+ cog_version: str,
+) -> Dict[str, Any]:
+ """A place to add backwards compatibility logic for our openapi schema"""
+
+ # If the top-level output is an array, assume it is an iterator in old versions which didn't have an array type
+ if version_has_no_array_type(cog_version):
+ output = schema["components"]["schemas"]["Output"]
+ if output.get("type") == "array":
+ output["x-cog-array-type"] = "iterator"
+ return schema
diff --git a/src/replicate/resources/models/predictions.py b/src/replicate/resources/models/predictions.py
index f0120bc..90f4d75 100644
--- a/src/replicate/resources/models/predictions.py
+++ b/src/replicate/resources/models/predictions.py
@@ -2,11 +2,13 @@
from __future__ import annotations
-from typing import List
+from typing import List, Optional
from typing_extensions import Literal
import httpx
+from replicate.lib._files import FileEncodingStrategy, encode_json, async_encode_json
+
from ..._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from ..._utils import maybe_transform, strip_not_given, async_maybe_transform
from ..._compat import cached_property
@@ -54,6 +56,7 @@ def create(
webhook: str | NotGiven = NOT_GIVEN,
webhook_events_filter: List[Literal["start", "output", "logs", "completed"]] | NotGiven = NOT_GIVEN,
prefer: str | NotGiven = NOT_GIVEN,
+ file_encoding_strategy: Optional["FileEncodingStrategy"] = None,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -171,7 +174,7 @@ def create(
f"/models/{model_owner}/{model_name}/predictions",
body=maybe_transform(
{
- "input": input,
+ "input": encode_json(input, self._client, file_encoding_strategy=file_encoding_strategy),
"stream": stream,
"webhook": webhook,
"webhook_events_filter": webhook_events_filter,
@@ -215,6 +218,7 @@ async def create(
webhook: str | NotGiven = NOT_GIVEN,
webhook_events_filter: List[Literal["start", "output", "logs", "completed"]] | NotGiven = NOT_GIVEN,
prefer: str | NotGiven = NOT_GIVEN,
+ file_encoding_strategy: Optional["FileEncodingStrategy"] = None,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -332,7 +336,9 @@ async def create(
f"/models/{model_owner}/{model_name}/predictions",
body=await async_maybe_transform(
{
- "input": input,
+ "input": await async_encode_json(
+ input, self._client, file_encoding_strategy=file_encoding_strategy
+ ),
"stream": stream,
"webhook": webhook,
"webhook_events_filter": webhook_events_filter,
diff --git a/src/replicate/resources/predictions.py b/src/replicate/resources/predictions.py
index 1ced764..6d368ce 100644
--- a/src/replicate/resources/predictions.py
+++ b/src/replicate/resources/predictions.py
@@ -2,14 +2,16 @@
from __future__ import annotations
-from typing import List, Union
+from typing import List, Union, Optional
from datetime import datetime
from typing_extensions import Literal
import httpx
+from replicate.lib._files import FileEncodingStrategy, encode_json, async_encode_json
+
from ..types import prediction_list_params, prediction_create_params
-from .._types import NOT_GIVEN, Body, Query, Headers, NoneType, NotGiven
+from .._types import NOT_GIVEN, Body, Query, Headers, NotGiven
from .._utils import maybe_transform, strip_not_given, async_maybe_transform
from .._compat import cached_property
from .._resource import SyncAPIResource, AsyncAPIResource
@@ -25,6 +27,8 @@
__all__ = ["PredictionsResource", "AsyncPredictionsResource"]
+PREDICTION_TERMINAL_STATES = {"succeeded", "failed", "canceled"}
+
class PredictionsResource(SyncAPIResource):
@cached_property
@@ -46,6 +50,14 @@ def with_streaming_response(self) -> PredictionsResourceWithStreamingResponse:
"""
return PredictionsResourceWithStreamingResponse(self)
+ def wait(self, prediction_id: str) -> Prediction:
+ """Wait for prediction to finish."""
+ prediction = self.get(prediction_id=prediction_id)
+ while prediction.status not in PREDICTION_TERMINAL_STATES:
+ self._sleep(self._client.poll_interval)
+ prediction = self.get(prediction_id=prediction.id)
+ return prediction
+
def create(
self,
*,
@@ -55,6 +67,7 @@ def create(
webhook: str | NotGiven = NOT_GIVEN,
webhook_events_filter: List[Literal["start", "output", "logs", "completed"]] | NotGiven = NOT_GIVEN,
prefer: str | NotGiven = NOT_GIVEN,
+ file_encoding_strategy: Optional["FileEncodingStrategy"] = None,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -178,7 +191,7 @@ def create(
"/predictions",
body=maybe_transform(
{
- "input": input,
+ "input": encode_json(input, self._client, file_encoding_strategy=file_encoding_strategy),
"version": version,
"stream": stream,
"webhook": webhook,
@@ -317,9 +330,32 @@ def cancel(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> None:
+ ) -> Prediction:
"""
- Cancel a prediction
+ Cancel a prediction that is currently running.
+
+ Example cURL request that creates a prediction and then cancels it:
+
+ ```console
+ # First, create a prediction
+ PREDICTION_ID=$(curl -s -X POST \\
+ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \\
+ -H "Content-Type: application/json" \\
+ -d '{
+ "input": {
+ "prompt": "a video that may take a while to generate"
+ }
+ }' \\
+ https://api.replicate.com/v1/models/minimax/video-01/predictions | jq -r '.id')
+
+ # Echo the prediction ID
+ echo "Created prediction with ID: $PREDICTION_ID"
+
+ # Cancel the prediction
+ curl -s -X POST \\
+ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \\
+ https://api.replicate.com/v1/predictions/$PREDICTION_ID/cancel
+ ```
Args:
extra_headers: Send extra headers
@@ -332,13 +368,12 @@ def cancel(
"""
if not prediction_id:
raise ValueError(f"Expected a non-empty value for `prediction_id` but received {prediction_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return self._post(
f"/predictions/{prediction_id}/cancel",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=NoneType,
+ cast_to=Prediction,
)
def get(
@@ -464,6 +499,14 @@ def with_streaming_response(self) -> AsyncPredictionsResourceWithStreamingRespon
"""
return AsyncPredictionsResourceWithStreamingResponse(self)
+ async def wait(self, prediction_id: str) -> Prediction:
+ """Wait for prediction to finish."""
+ prediction = await self.get(prediction_id=prediction_id)
+ while prediction.status not in PREDICTION_TERMINAL_STATES:
+ await self._sleep(self._client.poll_interval)
+ prediction = await self.get(prediction_id=prediction.id)
+ return prediction
+
async def create(
self,
*,
@@ -473,6 +516,7 @@ async def create(
webhook: str | NotGiven = NOT_GIVEN,
webhook_events_filter: List[Literal["start", "output", "logs", "completed"]] | NotGiven = NOT_GIVEN,
prefer: str | NotGiven = NOT_GIVEN,
+ file_encoding_strategy: Optional["FileEncodingStrategy"] = None,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -596,7 +640,9 @@ async def create(
"/predictions",
body=await async_maybe_transform(
{
- "input": input,
+ "input": await async_encode_json(
+ input, self._client, file_encoding_strategy=file_encoding_strategy
+ ),
"version": version,
"stream": stream,
"webhook": webhook,
@@ -735,9 +781,32 @@ async def cancel(
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = NOT_GIVEN,
- ) -> None:
+ ) -> Prediction:
"""
- Cancel a prediction
+ Cancel a prediction that is currently running.
+
+ Example cURL request that creates a prediction and then cancels it:
+
+ ```console
+ # First, create a prediction
+ PREDICTION_ID=$(curl -s -X POST \\
+ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \\
+ -H "Content-Type: application/json" \\
+ -d '{
+ "input": {
+ "prompt": "a video that may take a while to generate"
+ }
+ }' \\
+ https://api.replicate.com/v1/models/minimax/video-01/predictions | jq -r '.id')
+
+ # Echo the prediction ID
+ echo "Created prediction with ID: $PREDICTION_ID"
+
+ # Cancel the prediction
+ curl -s -X POST \\
+ -H "Authorization: Bearer $REPLICATE_API_TOKEN" \\
+ https://api.replicate.com/v1/predictions/$PREDICTION_ID/cancel
+ ```
Args:
extra_headers: Send extra headers
@@ -750,13 +819,12 @@ async def cancel(
"""
if not prediction_id:
raise ValueError(f"Expected a non-empty value for `prediction_id` but received {prediction_id!r}")
- extra_headers = {"Accept": "*/*", **(extra_headers or {})}
return await self._post(
f"/predictions/{prediction_id}/cancel",
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
- cast_to=NoneType,
+ cast_to=Prediction,
)
async def get(
diff --git a/src/replicate/types/__init__.py b/src/replicate/types/__init__.py
index aa49593..342b2a7 100644
--- a/src/replicate/types/__init__.py
+++ b/src/replicate/types/__init__.py
@@ -21,7 +21,10 @@
from .deployment_create_params import DeploymentCreateParams as DeploymentCreateParams
from .deployment_list_response import DeploymentListResponse as DeploymentListResponse
from .deployment_update_params import DeploymentUpdateParams as DeploymentUpdateParams
-from .prediction_create_params import PredictionCreateParams as PredictionCreateParams
+from .prediction_create_params import (
+ PredictionCreateParams as PredictionCreateParams,
+ PredictionCreateParamsWithoutVersion as PredictionCreateParamsWithoutVersion,
+)
from .training_cancel_response import TrainingCancelResponse as TrainingCancelResponse
from .training_create_response import TrainingCreateResponse as TrainingCreateResponse
from .deployment_create_response import DeploymentCreateResponse as DeploymentCreateResponse
diff --git a/src/replicate/types/prediction_create_params.py b/src/replicate/types/prediction_create_params.py
index 4e3026f..735e5f0 100644
--- a/src/replicate/types/prediction_create_params.py
+++ b/src/replicate/types/prediction_create_params.py
@@ -7,10 +7,10 @@
from .._utils import PropertyInfo
-__all__ = ["PredictionCreateParams"]
+__all__ = ["PredictionCreateParams", "PredictionCreateParamsWithoutVersion"]
-class PredictionCreateParams(TypedDict, total=False):
+class PredictionCreateParamsWithoutVersion(TypedDict, total=False):
input: Required[object]
"""The model's input as a JSON object.
@@ -36,18 +36,6 @@ class PredictionCreateParams(TypedDict, total=False):
- you don't need to use the file again (Replicate will not store it)
"""
- version: Required[str]
- """The ID of the model version that you want to run.
-
- This can be specified in two formats:
-
- 1. Just the 64-character version ID:
- `9dcd6d78e7c6560c340d916fe32e9f24aabfa331e5cce95fe31f77fb03121426`
- 2. Full model identifier with version ID in the format `{owner}/{model}:{id}`.
- For example,
- `replicate/hello-world:9dcd6d78e7c6560c340d916fe32e9f24aabfa331e5cce95fe31f77fb03121426`
- """
-
stream: bool
"""**This field is deprecated.**
@@ -103,3 +91,8 @@ class PredictionCreateParams(TypedDict, total=False):
"""
prefer: Annotated[str, PropertyInfo(alias="Prefer")]
+
+
+class PredictionCreateParams(PredictionCreateParamsWithoutVersion):
+ version: Required[str]
+ """The ID of the model version that you want to run."""
diff --git a/src/replicate_client/lib/.keep b/src/replicate_client/lib/.keep
deleted file mode 100644
index 5e2c99f..0000000
--- a/src/replicate_client/lib/.keep
+++ /dev/null
@@ -1,4 +0,0 @@
-File generated from our OpenAPI spec by Stainless.
-
-This directory can be used to store custom files to expand the SDK.
-It is ignored by Stainless code generation and its content (other than this keep file) won't be touched.
\ No newline at end of file
diff --git a/tests/api_resources/test_predictions.py b/tests/api_resources/test_predictions.py
index 550ed76..da7d81b 100644
--- a/tests/api_resources/test_predictions.py
+++ b/tests/api_resources/test_predictions.py
@@ -112,7 +112,7 @@ def test_method_cancel(self, client: Replicate) -> None:
prediction = client.predictions.cancel(
prediction_id="prediction_id",
)
- assert prediction is None
+ assert_matches_type(Prediction, prediction, path=["response"])
@pytest.mark.skip()
@parametrize
@@ -124,7 +124,7 @@ def test_raw_response_cancel(self, client: Replicate) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
prediction = response.parse()
- assert prediction is None
+ assert_matches_type(Prediction, prediction, path=["response"])
@pytest.mark.skip()
@parametrize
@@ -136,7 +136,7 @@ def test_streaming_response_cancel(self, client: Replicate) -> None:
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
prediction = response.parse()
- assert prediction is None
+ assert_matches_type(Prediction, prediction, path=["response"])
assert cast(Any, response.is_closed) is True
@@ -287,7 +287,7 @@ async def test_method_cancel(self, async_client: AsyncReplicate) -> None:
prediction = await async_client.predictions.cancel(
prediction_id="prediction_id",
)
- assert prediction is None
+ assert_matches_type(Prediction, prediction, path=["response"])
@pytest.mark.skip()
@parametrize
@@ -299,7 +299,7 @@ async def test_raw_response_cancel(self, async_client: AsyncReplicate) -> None:
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
prediction = await response.parse()
- assert prediction is None
+ assert_matches_type(Prediction, prediction, path=["response"])
@pytest.mark.skip()
@parametrize
@@ -311,7 +311,7 @@ async def test_streaming_response_cancel(self, async_client: AsyncReplicate) ->
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
prediction = await response.parse()
- assert prediction is None
+ assert_matches_type(Prediction, prediction, path=["response"])
assert cast(Any, response.is_closed) is True
diff --git a/tests/lib/__init__.py b/tests/lib/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/tests/lib/test_run.py b/tests/lib/test_run.py
new file mode 100644
index 0000000..43df10d
--- /dev/null
+++ b/tests/lib/test_run.py
@@ -0,0 +1,633 @@
+from __future__ import annotations
+
+import io
+import os
+import datetime
+from typing import Any, Dict, List, Optional
+
+import httpx
+import pytest
+from respx import MockRouter
+
+from replicate import Replicate, AsyncReplicate
+from replicate.lib._files import FileOutput, AsyncFileOutput
+from replicate._exceptions import ModelError, NotFoundError, BadRequestError
+from replicate.lib._models import Model, Version, ModelVersionIdentifier
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+bearer_token = "My Bearer Token"
+
+
+# Mock prediction data for testing
+def create_mock_prediction(
+ status: str = "succeeded",
+ output: Any = "test output",
+ error: Optional[str] = None,
+ logs: Optional[str] = None,
+ urls: Optional[Dict[str, str]] = None,
+) -> Dict[str, Any]:
+ if urls is None:
+ urls = {
+ "get": "https://api.replicate.com/v1/predictions/test_prediction_id",
+ "cancel": "https://api.replicate.com/v1/predictions/test_prediction_id/cancel",
+ }
+
+ return {
+ "id": "test_prediction_id",
+ "version": "test_version",
+ "status": status,
+ "input": {"prompt": "test prompt"},
+ "output": output,
+ "error": error,
+ "logs": logs,
+ "created_at": "2023-01-01T00:00:00Z",
+ "started_at": "2023-01-01T00:00:01Z",
+ "completed_at": "2023-01-01T00:00:02Z" if status in ["succeeded", "failed"] else None,
+ "urls": urls,
+ "model": "test-model",
+ "data_removed": False,
+ }
+
+
+def _version_with_schema(id: str = "v1", output_schema: Optional[object] = None) -> Version:
+ return Version(
+ id=id,
+ created_at=datetime.datetime.fromisoformat("2022-03-16T00:35:56.210272"),
+ cog_version="dev",
+ openapi_schema={
+ "openapi": "3.0.2",
+ "info": {"title": "Cog", "version": "0.1.0"},
+ "paths": {},
+ "components": {
+ "schemas": {
+ "Input": {
+ "type": "object",
+ "title": "Input",
+ "required": ["text"],
+ "properties": {
+ "text": {
+ "type": "string",
+ "title": "Text",
+ "x-order": 0,
+ "description": "The text input",
+ },
+ },
+ },
+ "Output": output_schema
+ or {
+ "type": "string",
+ "title": "Output",
+ },
+ }
+ },
+ },
+ )
+
+
+class TestRun:
+ client = Replicate(base_url=base_url, bearer_token=bearer_token, _strict_response_validation=True)
+
+ # Common model reference format that will work with the new SDK
+ model_ref = "owner/name:version"
+
+ @pytest.mark.respx(base_url=base_url)
+ def test_run_basic(self, respx_mock: MockRouter) -> None:
+ """Test basic model run functionality."""
+ respx_mock.post("/predictions").mock(return_value=httpx.Response(201, json=create_mock_prediction()))
+
+ output: Any = self.client.run(self.model_ref, input={"prompt": "test prompt"})
+
+ assert output == "test output"
+
+ @pytest.mark.respx(base_url=base_url)
+ def test_run_with_wait_true(self, respx_mock: MockRouter) -> None:
+ """Test run with wait=True parameter."""
+ respx_mock.post("/predictions").mock(return_value=httpx.Response(201, json=create_mock_prediction()))
+
+ output: Any = self.client.run(self.model_ref, wait=True, input={"prompt": "test prompt"})
+
+ assert output == "test output"
+
+ @pytest.mark.respx(base_url=base_url)
+ def test_run_with_wait_int(self, respx_mock: MockRouter) -> None:
+ """Test run with wait as an integer value."""
+ respx_mock.post("/predictions").mock(return_value=httpx.Response(201, json=create_mock_prediction()))
+
+ output: Any = self.client.run(self.model_ref, wait=10, input={"prompt": "test prompt"})
+
+ assert output == "test output"
+
+ @pytest.mark.respx(base_url=base_url)
+ def test_run_without_wait(self, respx_mock: MockRouter) -> None:
+ """Test run with wait=False parameter."""
+ # Initial prediction state is "processing"
+ respx_mock.post("/predictions").mock(
+ return_value=httpx.Response(201, json=create_mock_prediction(status="processing"))
+ )
+
+ # When we wait for it, it becomes "succeeded"
+ respx_mock.get("/predictions/test_prediction_id").mock(
+ return_value=httpx.Response(200, json=create_mock_prediction(status="succeeded"))
+ )
+
+ output: Any = self.client.run(self.model_ref, wait=False, input={"prompt": "test prompt"})
+
+ assert output == "test output"
+
+ @pytest.mark.respx(base_url=base_url, assert_all_mocked=False)
+ def test_run_with_file_output(self, respx_mock: MockRouter) -> None:
+ """Test run with file output."""
+ # Mock prediction with file URL output
+ file_url = "https://replicate.delivery/output.png"
+ respx_mock.post("/predictions").mock(
+ return_value=httpx.Response(201, json=create_mock_prediction(output=file_url))
+ )
+
+ output: Any = self.client.run(self.model_ref, input={"prompt": "generate image"})
+
+ assert isinstance(output, FileOutput)
+ assert output.url == file_url
+
+ @pytest.mark.respx(base_url=base_url)
+ def test_run_with_data_uri_output(self, respx_mock: MockRouter) -> None:
+ """Test run with data URI output."""
+ # Create a data URI for a small PNG image (1x1 transparent pixel)
+ data_uri = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNkYAAAAAYAAjCB0C8AAAAASUVORK5CYII="
+
+ # Mock prediction with data URI output
+ respx_mock.post("/predictions").mock(
+ return_value=httpx.Response(201, json=create_mock_prediction(output=data_uri))
+ )
+
+ # Use a valid model version ID format
+ output: Any = self.client.run("owner/name:version", input={"prompt": "generate small image"})
+
+ assert isinstance(output, FileOutput)
+ assert output.url == data_uri
+
+ # Test that we can read the data
+ image_data = output.read()
+ assert isinstance(image_data, bytes)
+ assert len(image_data) > 0
+
+ # Test that we can iterate over the data
+ chunks = list(output)
+ assert len(chunks) == 1
+ assert chunks[0] == image_data
+
+ @pytest.mark.respx(base_url=base_url)
+ def test_run_with_file_list_output(self, respx_mock: MockRouter) -> None:
+ """Test run with list of file outputs."""
+ # Create a mock prediction response with a list of file URLs
+ file_urls = ["https://replicate.delivery/output1.png", "https://replicate.delivery/output2.png"]
+ mock_prediction = create_mock_prediction()
+ mock_prediction["output"] = file_urls
+
+ # Mock the endpoint
+ respx_mock.post("/predictions").mock(return_value=httpx.Response(201, json=mock_prediction))
+
+ output: list[FileOutput] = self.client.run(
+ self.model_ref, use_file_output=True, input={"prompt": "generate multiple images"}
+ )
+
+ assert isinstance(output, list)
+ assert len(output) == 2
+ assert all(isinstance(item, FileOutput) for item in output)
+
+ @pytest.mark.respx(base_url=base_url)
+ def test_run_with_dict_file_output(self, respx_mock: MockRouter) -> None:
+ """Test run with dictionary of file outputs."""
+ # Mock prediction with dict of file URLs
+ file_urls = {
+ "image1": "https://replicate.delivery/output1.png",
+ "image2": "https://replicate.delivery/output2.png",
+ }
+ respx_mock.post("/predictions").mock(
+ return_value=httpx.Response(201, json=create_mock_prediction(output=file_urls))
+ )
+
+ output: Dict[str, FileOutput] = self.client.run(self.model_ref, input={"prompt": "structured output"})
+
+ assert isinstance(output, dict)
+ assert len(output) == 2
+ assert all(isinstance(item, FileOutput) for item in output.values())
+
+ @pytest.mark.respx(base_url=base_url)
+ def test_run_with_error(self, respx_mock: MockRouter) -> None:
+ """Test run with model error."""
+ # Mock prediction with error
+ respx_mock.post("/predictions").mock(
+ return_value=httpx.Response(201, json=create_mock_prediction(status="failed", error="Model error occurred"))
+ )
+
+ with pytest.raises(ModelError):
+ self.client.run(self.model_ref, input={"prompt": "trigger error"})
+
+ @pytest.mark.respx(base_url=base_url)
+ def test_run_with_base64_file(self, respx_mock: MockRouter) -> None:
+ """Test run with base64 encoded file input."""
+ # Create a simple file-like object
+ file_obj = io.BytesIO(b"test content")
+
+ # Mock the prediction response
+ respx_mock.post("/predictions").mock(return_value=httpx.Response(201, json=create_mock_prediction()))
+
+ output: Any = self.client.run(self.model_ref, input={"file": file_obj}, file_encoding_strategy="base64")
+
+ assert output == "test output"
+
+ def test_run_with_prefer_conflict(self) -> None:
+ """Test run with conflicting wait and prefer parameters."""
+ with pytest.raises(TypeError, match="cannot mix and match prefer and wait"):
+ self.client.run(self.model_ref, wait=True, prefer="nowait", input={"prompt": "test"})
+
+ @pytest.mark.respx(base_url=base_url)
+ def test_run_with_iterator(self, respx_mock: MockRouter) -> None:
+ """Test run with an iterator output."""
+ # Create a mock prediction with an iterator output
+ output_iterator = ["chunk1", "chunk2", "chunk3"]
+ respx_mock.post("/predictions").mock(
+ return_value=httpx.Response(201, json=create_mock_prediction(output=output_iterator))
+ )
+
+ output: list[str] = self.client.run(self.model_ref, input={"prompt": "generate iterator"})
+
+ assert isinstance(output, list)
+ assert len(output) == 3
+ assert output == output_iterator
+
+ @pytest.mark.respx(base_url=base_url)
+ def test_run_with_invalid_identifier(self, respx_mock: MockRouter) -> None:
+ """Test run with an invalid model identifier."""
+ # Mock a 404 response for an invalid model identifier
+ respx_mock.post("/predictions").mock(return_value=httpx.Response(404, json={"detail": "Model not found"}))
+
+ with pytest.raises(NotFoundError):
+ self.client.run("invalid/model:ref", input={"prompt": "test prompt"})
+
+ @pytest.mark.respx(base_url=base_url)
+ def test_run_with_invalid_cog_version(self, respx_mock: MockRouter) -> None:
+ """Test run with an invalid Cog version."""
+ # Mock an error response for an invalid Cog version
+ respx_mock.post("/predictions").mock(return_value=httpx.Response(400, json={"detail": "Invalid Cog version"}))
+
+ with pytest.raises(BadRequestError):
+ self.client.run("invalid/cog:model", input={"prompt": "test prompt"})
+
+ @pytest.mark.respx(base_url=base_url)
+ def test_run_with_model_object(self, respx_mock: MockRouter) -> None:
+ """Test run with Model object reference."""
+ # Mock the models endpoint for owner/name lookup
+ respx_mock.post("/models/test-owner/test-model/predictions").mock(
+ return_value=httpx.Response(201, json=create_mock_prediction())
+ )
+
+ model = Model(owner="test-owner", name="test-model")
+ output = self.client.run(model, input={"prompt": "test prompt"})
+
+ assert output == "test output"
+
+ @pytest.mark.respx(base_url=base_url)
+ def test_run_with_version_object(self, respx_mock: MockRouter) -> None:
+ """Test run with Version object reference."""
+ # Version ID is used directly
+ respx_mock.post("/predictions").mock(return_value=httpx.Response(201, json=create_mock_prediction()))
+
+ version = _version_with_schema("test-version-id")
+ output = self.client.run(version, input={"prompt": "test prompt"})
+
+ assert output == "test output"
+
+ @pytest.mark.respx(base_url=base_url)
+ def test_run_with_model_version_identifier(self, respx_mock: MockRouter) -> None:
+ """Test run with ModelVersionIdentifier dict reference."""
+ # Case where version ID is provided
+ respx_mock.post("/predictions").mock(return_value=httpx.Response(201, json=create_mock_prediction()))
+
+ identifier = ModelVersionIdentifier(owner="test-owner", name="test-model", version="test-version-id")
+ output = self.client.run(identifier, input={"prompt": "test prompt"})
+
+ assert output == "test output"
+
+ @pytest.mark.respx(base_url=base_url)
+ def test_run_with_file_output_iterator(self, respx_mock: MockRouter) -> None:
+ """Test run with file output iterator."""
+ # Mock URLs for file outputs
+ file_urls = [
+ "https://replicate.delivery/output1.png",
+ "https://replicate.delivery/output2.png",
+ "https://replicate.delivery/output3.png",
+ ]
+
+ # Initial response with processing status and no output
+ respx_mock.post("/predictions").mock(
+ return_value=httpx.Response(201, json=create_mock_prediction(status="processing", output=None))
+ )
+
+ # First poll returns still processing
+ respx_mock.get("/predictions/test_prediction_id").mock(
+ return_value=httpx.Response(200, json=create_mock_prediction(status="processing", output=None))
+ )
+
+ # Second poll returns success with file URLs
+ respx_mock.get("/predictions/test_prediction_id").mock(
+ return_value=httpx.Response(200, json=create_mock_prediction(output=file_urls))
+ )
+
+ output: list[FileOutput] = self.client.run(
+ self.model_ref, use_file_output=True, wait=False, input={"prompt": "generate file iterator"}
+ )
+
+ assert isinstance(output, list)
+ assert len(output) == 3
+ assert all(isinstance(item, FileOutput) for item in output)
+ assert [item.url for item in output] == file_urls
+
+
+class TestAsyncRun:
+ client = AsyncReplicate(base_url=base_url, bearer_token=bearer_token, _strict_response_validation=True)
+
+ # Common model reference format that will work with the new SDK
+ model_ref = "owner/name:version"
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_async_run_basic(self, respx_mock: MockRouter) -> None:
+ """Test basic async model run functionality."""
+ respx_mock.post("/predictions").mock(return_value=httpx.Response(201, json=create_mock_prediction()))
+
+ output: Any = await self.client.run(self.model_ref, input={"prompt": "test prompt"})
+
+ assert output == "test output"
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_async_run_with_wait_true(self, respx_mock: MockRouter) -> None:
+ """Test async run with wait=True parameter."""
+ respx_mock.post("/predictions").mock(return_value=httpx.Response(201, json=create_mock_prediction()))
+
+ output: Any = await self.client.run(self.model_ref, wait=True, input={"prompt": "test prompt"})
+
+ assert output == "test output"
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_async_run_with_wait_int(self, respx_mock: MockRouter) -> None:
+ """Test async run with wait as an integer value."""
+ respx_mock.post("/predictions").mock(return_value=httpx.Response(201, json=create_mock_prediction()))
+
+ output: Any = await self.client.run(self.model_ref, wait=10, input={"prompt": "test prompt"})
+
+ assert output == "test output"
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_async_run_without_wait(self, respx_mock: MockRouter) -> None:
+ """Test async run with wait=False parameter."""
+ # Initial prediction state is "processing"
+ respx_mock.post("/predictions").mock(
+ return_value=httpx.Response(201, json=create_mock_prediction(status="processing"))
+ )
+
+ # When we wait for it, it becomes "succeeded"
+ respx_mock.get("/predictions/test_prediction_id").mock(
+ return_value=httpx.Response(200, json=create_mock_prediction(status="succeeded"))
+ )
+
+ output: Any = await self.client.run(self.model_ref, wait=False, input={"prompt": "test prompt"})
+
+ assert output == "test output"
+
+ @pytest.mark.respx(base_url=base_url, assert_all_mocked=False)
+ async def test_async_run_with_file_output(self, respx_mock: MockRouter) -> None:
+ """Test async run with file output."""
+ # Mock prediction with file URL output
+ file_url = "https://replicate.delivery/output.png"
+ respx_mock.post("/predictions").mock(
+ return_value=httpx.Response(201, json=create_mock_prediction(output=file_url))
+ )
+
+ output: Any = await self.client.run(self.model_ref, input={"prompt": "generate image"})
+
+ assert isinstance(output, AsyncFileOutput)
+ assert output.url == file_url
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_async_run_with_data_uri_output(self, respx_mock: MockRouter) -> None:
+ """Test async run with data URI output."""
+ # Create a data URI for a small PNG image (1x1 transparent pixel)
+ data_uri = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mNkYAAAAAYAAjCB0C8AAAAASUVORK5CYII="
+
+ # Mock prediction with data URI output
+ respx_mock.post("/predictions").mock(
+ return_value=httpx.Response(201, json=create_mock_prediction(output=data_uri))
+ )
+
+ # Use a valid model version ID format
+ output: Any = await self.client.run("owner/name:version", input={"prompt": "generate small image"})
+
+ assert isinstance(output, AsyncFileOutput)
+ assert output.url == data_uri
+
+ # Test that we can read the data asynchronously
+ image_data = await output.read()
+ assert isinstance(image_data, bytes)
+ assert len(image_data) > 0
+
+ # Test that we can iterate over the data asynchronously
+ chunks: List[Any] = []
+ async for chunk in output:
+ chunks.append(chunk)
+
+ assert len(chunks) == 1
+ assert chunks[0] == image_data
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_async_run_with_file_list_output(self, respx_mock: MockRouter) -> None:
+ """Test async run with list of file outputs."""
+ # Create a mock prediction response with a list of file URLs
+ file_urls = ["https://replicate.delivery/output1.png", "https://replicate.delivery/output2.png"]
+ mock_prediction = create_mock_prediction()
+ mock_prediction["output"] = file_urls
+
+ # Mock the endpoint
+ respx_mock.post("/predictions").mock(return_value=httpx.Response(201, json=mock_prediction))
+
+ output: list[AsyncFileOutput] = await self.client.run(
+ self.model_ref, input={"prompt": "generate multiple images"}
+ )
+
+ assert isinstance(output, list)
+ assert len(output) == 2
+ assert all(isinstance(item, AsyncFileOutput) for item in output)
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_async_run_with_dict_file_output(self, respx_mock: MockRouter) -> None:
+ """Test async run with dictionary of file outputs."""
+ # Mock prediction with dict of file URLs
+ file_urls = {
+ "image1": "https://replicate.delivery/output1.png",
+ "image2": "https://replicate.delivery/output2.png",
+ }
+ respx_mock.post("/predictions").mock(
+ return_value=httpx.Response(201, json=create_mock_prediction(output=file_urls))
+ )
+
+ output: Dict[str, AsyncFileOutput] = await self.client.run(
+ self.model_ref, input={"prompt": "structured output"}
+ )
+
+ assert isinstance(output, dict)
+ assert len(output) == 2
+ assert all(isinstance(item, AsyncFileOutput) for item in output.values())
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_async_run_with_error(self, respx_mock: MockRouter) -> None:
+ """Test async run with model error."""
+ # Mock prediction with error
+ respx_mock.post("/predictions").mock(
+ return_value=httpx.Response(201, json=create_mock_prediction(status="failed", error="Model error occurred"))
+ )
+
+ with pytest.raises(ModelError):
+ await self.client.run(self.model_ref, input={"prompt": "trigger error"})
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_async_run_with_base64_file(self, respx_mock: MockRouter) -> None:
+ """Test async run with base64 encoded file input."""
+ # Create a simple file-like object
+ file_obj = io.BytesIO(b"test content")
+
+ # Mock the prediction response
+ respx_mock.post("/predictions").mock(return_value=httpx.Response(201, json=create_mock_prediction()))
+
+ output: Any = await self.client.run(self.model_ref, input={"file": file_obj}, file_encoding_strategy="base64")
+
+ assert output == "test output"
+
+ async def test_async_run_with_prefer_conflict(self) -> None:
+ """Test async run with conflicting wait and prefer parameters."""
+ with pytest.raises(TypeError, match="cannot mix and match prefer and wait"):
+ await self.client.run(self.model_ref, wait=True, prefer="nowait", input={"prompt": "test"})
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_async_run_with_iterator(self, respx_mock: MockRouter) -> None:
+ """Test async run with an iterator output."""
+ # Create a mock prediction with an iterator output
+ output_iterator = ["chunk1", "chunk2", "chunk3"]
+ respx_mock.post("/predictions").mock(
+ return_value=httpx.Response(201, json=create_mock_prediction(output=output_iterator))
+ )
+
+ output: list[str] = await self.client.run(self.model_ref, input={"prompt": "generate iterator"})
+
+ assert isinstance(output, list)
+ assert len(output) == 3
+ assert output == output_iterator
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_async_run_with_invalid_identifier(self, respx_mock: MockRouter) -> None:
+ """Test async run with an invalid model identifier."""
+ # Mock a 404 response for an invalid model identifier
+ respx_mock.post("/predictions").mock(return_value=httpx.Response(404, json={"detail": "Model not found"}))
+
+ with pytest.raises(NotFoundError):
+ await self.client.run("invalid/model:ref", input={"prompt": "test prompt"})
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_async_run_with_invalid_cog_version(self, respx_mock: MockRouter) -> None:
+ """Test async run with an invalid Cog version."""
+ # Mock an error response for an invalid Cog version
+ respx_mock.post("/predictions").mock(return_value=httpx.Response(400, json={"detail": "Invalid Cog version"}))
+
+ with pytest.raises(BadRequestError):
+ await self.client.run("invalid/cog:model", input={"prompt": "test prompt"})
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_async_run_with_model_object(self, respx_mock: MockRouter) -> None:
+ """Test async run with Model object reference."""
+ # Mock the models endpoint for owner/name lookup
+ respx_mock.post("/models/test-owner/test-model/predictions").mock(
+ return_value=httpx.Response(201, json=create_mock_prediction())
+ )
+
+ model = Model(owner="test-owner", name="test-model")
+ output = await self.client.run(model, input={"prompt": "test prompt"})
+
+ assert output == "test output"
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_async_run_with_version_object(self, respx_mock: MockRouter) -> None:
+ """Test async run with Version object reference."""
+ # Version ID is used directly
+ respx_mock.post("/predictions").mock(return_value=httpx.Response(201, json=create_mock_prediction()))
+
+ version = _version_with_schema("test-version-id")
+ output = await self.client.run(version, input={"prompt": "test prompt"})
+
+ assert output == "test output"
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_async_run_with_model_version_identifier(self, respx_mock: MockRouter) -> None:
+ """Test async run with ModelVersionIdentifier dict reference."""
+ # Case where version ID is provided
+ respx_mock.post("/predictions").mock(return_value=httpx.Response(201, json=create_mock_prediction()))
+
+ identifier = ModelVersionIdentifier(owner="test-owner", name="test-model", version="test-version-id")
+ output = await self.client.run(identifier, input={"prompt": "test prompt"})
+
+ assert output == "test output"
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_async_run_with_file_output_iterator(self, respx_mock: MockRouter) -> None:
+ """Test async run with file output iterator."""
+ # Mock URLs for file outputs
+ file_urls = [
+ "https://replicate.delivery/output1.png",
+ "https://replicate.delivery/output2.png",
+ "https://replicate.delivery/output3.png",
+ ]
+
+ # Initial response with processing status and no output
+ respx_mock.post("/predictions").mock(
+ return_value=httpx.Response(201, json=create_mock_prediction(status="processing", output=None))
+ )
+
+ # First poll returns still processing
+ respx_mock.get("/predictions/test_prediction_id").mock(
+ return_value=httpx.Response(200, json=create_mock_prediction(status="processing", output=None))
+ )
+
+ # Second poll returns success with file URLs
+ respx_mock.get("/predictions/test_prediction_id").mock(
+ return_value=httpx.Response(200, json=create_mock_prediction(output=file_urls))
+ )
+
+ output: list[AsyncFileOutput] = await self.client.run(
+ self.model_ref, use_file_output=True, wait=False, input={"prompt": "generate file iterator"}
+ )
+
+ assert isinstance(output, list)
+ assert len(output) == 3
+ assert all(isinstance(item, AsyncFileOutput) for item in output)
+ assert [item.url for item in output] == file_urls
+
+ @pytest.mark.respx(base_url=base_url)
+ async def test_async_run_concurrently(self, respx_mock: MockRouter) -> None:
+ """Test running multiple models concurrently with asyncio."""
+ import asyncio
+
+ # Mock three different prediction responses
+ mock_outputs = ["output1", "output2", "output3"]
+ prompts = ["prompt1", "prompt2", "prompt3"]
+
+ # Set up mocks for each request (using side_effect to allow multiple matches)
+ # Note: This will match any POST to /predictions but return different responses
+ route = respx_mock.post("/predictions")
+ route.side_effect = [httpx.Response(201, json=create_mock_prediction(output=output)) for output in mock_outputs]
+
+ # Run three predictions concurrently
+ tasks = [self.client.run("owner/name:version", input={"prompt": prompt}) for prompt in prompts]
+
+ results = await asyncio.gather(*tasks)
+
+ # Verify each result matches expected output
+ assert len(results) == 3
+ for i, result in enumerate(results):
+ assert result == mock_outputs[i]
diff --git a/uv.lock b/uv.lock
new file mode 100644
index 0000000..5ed937a
--- /dev/null
+++ b/uv.lock
@@ -0,0 +1,278 @@
+version = 1
+requires-python = ">=3.8"
+
+[[package]]
+name = "annotated-types"
+version = "0.7.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "typing-extensions", marker = "python_full_version < '3.9'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/ee/67/531ea369ba64dcff5ec9c3402f9f51bf748cec26dde048a2f973a4eea7f5/annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89", size = 16081 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/78/b6/6307fbef88d9b5ee7421e68d78a9f162e0da4900bc5f5793f6d3d0e34fb8/annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53", size = 13643 },
+]
+
+[[package]]
+name = "anyio"
+version = "4.5.2"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "exceptiongroup", marker = "python_full_version < '3.11'" },
+ { name = "idna" },
+ { name = "sniffio" },
+ { name = "typing-extensions", marker = "python_full_version < '3.11'" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/4d/f9/9a7ce600ebe7804daf90d4d48b1c0510a4561ddce43a596be46676f82343/anyio-4.5.2.tar.gz", hash = "sha256:23009af4ed04ce05991845451e11ef02fc7c5ed29179ac9a420e5ad0ac7ddc5b", size = 171293 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/1b/b4/f7e396030e3b11394436358ca258a81d6010106582422f23443c16ca1873/anyio-4.5.2-py3-none-any.whl", hash = "sha256:c011ee36bc1e8ba40e5a81cb9df91925c218fe9b778554e0b56a21e1b5d4716f", size = 89766 },
+]
+
+[[package]]
+name = "asyncio"
+version = "3.4.3"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/da/54/054bafaf2c0fb8473d423743e191fcdf49b2c1fd5e9af3524efbe097bafd/asyncio-3.4.3.tar.gz", hash = "sha256:83360ff8bc97980e4ff25c964c7bd3923d333d177aa4f7fb736b019f26c7cb41", size = 204411 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/22/74/07679c5b9f98a7cb0fc147b1ef1cc1853bc07a4eb9cb5731e24732c5f773/asyncio-3.4.3-py3-none-any.whl", hash = "sha256:c4d18b22701821de07bd6aea8b53d21449ec0ec5680645e5317062ea21817d2d", size = 101767 },
+]
+
+[[package]]
+name = "certifi"
+version = "2025.4.26"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/e8/9e/c05b3920a3b7d20d3d3310465f50348e5b3694f4f88c6daf736eef3024c4/certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6", size = 160705 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/4a/7e/3db2bd1b1f9e95f7cddca6d6e75e2f2bd9f51b1246e546d88addca0106bd/certifi-2025.4.26-py3-none-any.whl", hash = "sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3", size = 159618 },
+]
+
+[[package]]
+name = "distro"
+version = "1.9.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/fc/f8/98eea607f65de6527f8a2e8885fc8015d3e6f5775df186e443e0964a11c3/distro-1.9.0.tar.gz", hash = "sha256:2fa77c6fd8940f116ee1d6b94a2f90b13b5ea8d019b98bc8bafdcabcdd9bdbed", size = 60722 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/12/b3/231ffd4ab1fc9d679809f356cebee130ac7daa00d6d6f3206dd4fd137e9e/distro-1.9.0-py3-none-any.whl", hash = "sha256:7bffd925d65168f85027d8da9af6bddab658135b840670a223589bc0c8ef02b2", size = 20277 },
+]
+
+[[package]]
+name = "exceptiongroup"
+version = "1.2.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/09/35/2495c4ac46b980e4ca1f6ad6db102322ef3ad2410b79fdde159a4b0f3b92/exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc", size = 28883 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/02/cc/b7e31358aac6ed1ef2bb790a9746ac2c69bcb3c8588b41616914eb106eaf/exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b", size = 16453 },
+]
+
+[[package]]
+name = "h11"
+version = "0.16.0"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/01/ee/02a2c011bdab74c6fb3c75474d40b3052059d95df7e73351460c8588d963/h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1", size = 101250 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/04/4b/29cac41a4d98d144bf5f6d33995617b185d14b22401f75ca86f384e87ff1/h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86", size = 37515 },
+]
+
+[[package]]
+name = "httpcore"
+version = "1.0.9"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "certifi" },
+ { name = "h11" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/06/94/82699a10bca87a5556c9c59b5963f2d039dbd239f25bc2a63907a05a14cb/httpcore-1.0.9.tar.gz", hash = "sha256:6e34463af53fd2ab5d807f399a9b45ea31c3dfa2276f15a2c3f00afff6e176e8", size = 85484 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784 },
+]
+
+[[package]]
+name = "httpx"
+version = "0.28.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "anyio" },
+ { name = "certifi" },
+ { name = "httpcore" },
+ { name = "idna" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/b1/df/48c586a5fe32a0f01324ee087459e112ebb7224f646c0b5023f5e79e9956/httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc", size = 141406 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/2a/39/e50c7c3a983047577ee07d2a9e53faf5a69493943ec3f6a384bdc792deb2/httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad", size = 73517 },
+]
+
+[[package]]
+name = "idna"
+version = "3.10"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/f1/70/7703c29685631f5a7590aa73f1f1d3fa9a380e654b86af429e0934a32f7d/idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9", size = 190490 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/76/c6/c88e154df9c4e1a2a66ccf0005a88dfb2650c1dffb6f5ce603dfbd452ce3/idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3", size = 70442 },
+]
+
+[[package]]
+name = "pydantic"
+version = "2.10.6"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "annotated-types" },
+ { name = "pydantic-core" },
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/b7/ae/d5220c5c52b158b1de7ca89fc5edb72f304a70a4c540c84c8844bf4008de/pydantic-2.10.6.tar.gz", hash = "sha256:ca5daa827cce33de7a42be142548b0096bf05a7e7b365aebfa5f8eeec7128236", size = 761681 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/f4/3c/8cc1cc84deffa6e25d2d0c688ebb80635dfdbf1dbea3e30c541c8cf4d860/pydantic-2.10.6-py3-none-any.whl", hash = "sha256:427d664bf0b8a2b34ff5dd0f5a18df00591adcee7198fbd71981054cef37b584", size = 431696 },
+]
+
+[[package]]
+name = "pydantic-core"
+version = "2.27.2"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/fc/01/f3e5ac5e7c25833db5eb555f7b7ab24cd6f8c322d3a3ad2d67a952dc0abc/pydantic_core-2.27.2.tar.gz", hash = "sha256:eb026e5a4c1fee05726072337ff51d1efb6f59090b7da90d30ea58625b1ffb39", size = 413443 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/3a/bc/fed5f74b5d802cf9a03e83f60f18864e90e3aed7223adaca5ffb7a8d8d64/pydantic_core-2.27.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2d367ca20b2f14095a8f4fa1210f5a7b78b8a20009ecced6b12818f455b1e9fa", size = 1895938 },
+ { url = "https://files.pythonhosted.org/packages/71/2a/185aff24ce844e39abb8dd680f4e959f0006944f4a8a0ea372d9f9ae2e53/pydantic_core-2.27.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:491a2b73db93fab69731eaee494f320faa4e093dbed776be1a829c2eb222c34c", size = 1815684 },
+ { url = "https://files.pythonhosted.org/packages/c3/43/fafabd3d94d159d4f1ed62e383e264f146a17dd4d48453319fd782e7979e/pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7969e133a6f183be60e9f6f56bfae753585680f3b7307a8e555a948d443cc05a", size = 1829169 },
+ { url = "https://files.pythonhosted.org/packages/a2/d1/f2dfe1a2a637ce6800b799aa086d079998959f6f1215eb4497966efd2274/pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3de9961f2a346257caf0aa508a4da705467f53778e9ef6fe744c038119737ef5", size = 1867227 },
+ { url = "https://files.pythonhosted.org/packages/7d/39/e06fcbcc1c785daa3160ccf6c1c38fea31f5754b756e34b65f74e99780b5/pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e2bb4d3e5873c37bb3dd58714d4cd0b0e6238cebc4177ac8fe878f8b3aa8e74c", size = 2037695 },
+ { url = "https://files.pythonhosted.org/packages/7a/67/61291ee98e07f0650eb756d44998214231f50751ba7e13f4f325d95249ab/pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:280d219beebb0752699480fe8f1dc61ab6615c2046d76b7ab7ee38858de0a4e7", size = 2741662 },
+ { url = "https://files.pythonhosted.org/packages/32/90/3b15e31b88ca39e9e626630b4c4a1f5a0dfd09076366f4219429e6786076/pydantic_core-2.27.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47956ae78b6422cbd46f772f1746799cbb862de838fd8d1fbd34a82e05b0983a", size = 1993370 },
+ { url = "https://files.pythonhosted.org/packages/ff/83/c06d333ee3a67e2e13e07794995c1535565132940715931c1c43bfc85b11/pydantic_core-2.27.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:14d4a5c49d2f009d62a2a7140d3064f686d17a5d1a268bc641954ba181880236", size = 1996813 },
+ { url = "https://files.pythonhosted.org/packages/7c/f7/89be1c8deb6e22618a74f0ca0d933fdcb8baa254753b26b25ad3acff8f74/pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:337b443af21d488716f8d0b6164de833e788aa6bd7e3a39c005febc1284f4962", size = 2005287 },
+ { url = "https://files.pythonhosted.org/packages/b7/7d/8eb3e23206c00ef7feee17b83a4ffa0a623eb1a9d382e56e4aa46fd15ff2/pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_armv7l.whl", hash = "sha256:03d0f86ea3184a12f41a2d23f7ccb79cdb5a18e06993f8a45baa8dfec746f0e9", size = 2128414 },
+ { url = "https://files.pythonhosted.org/packages/4e/99/fe80f3ff8dd71a3ea15763878d464476e6cb0a2db95ff1c5c554133b6b83/pydantic_core-2.27.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7041c36f5680c6e0f08d922aed302e98b3745d97fe1589db0a3eebf6624523af", size = 2155301 },
+ { url = "https://files.pythonhosted.org/packages/2b/a3/e50460b9a5789ca1451b70d4f52546fa9e2b420ba3bfa6100105c0559238/pydantic_core-2.27.2-cp310-cp310-win32.whl", hash = "sha256:50a68f3e3819077be2c98110c1f9dcb3817e93f267ba80a2c05bb4f8799e2ff4", size = 1816685 },
+ { url = "https://files.pythonhosted.org/packages/57/4c/a8838731cb0f2c2a39d3535376466de6049034d7b239c0202a64aaa05533/pydantic_core-2.27.2-cp310-cp310-win_amd64.whl", hash = "sha256:e0fd26b16394ead34a424eecf8a31a1f5137094cabe84a1bcb10fa6ba39d3d31", size = 1982876 },
+ { url = "https://files.pythonhosted.org/packages/c2/89/f3450af9d09d44eea1f2c369f49e8f181d742f28220f88cc4dfaae91ea6e/pydantic_core-2.27.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:8e10c99ef58cfdf2a66fc15d66b16c4a04f62bca39db589ae8cba08bc55331bc", size = 1893421 },
+ { url = "https://files.pythonhosted.org/packages/9e/e3/71fe85af2021f3f386da42d291412e5baf6ce7716bd7101ea49c810eda90/pydantic_core-2.27.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:26f32e0adf166a84d0cb63be85c562ca8a6fa8de28e5f0d92250c6b7e9e2aff7", size = 1814998 },
+ { url = "https://files.pythonhosted.org/packages/a6/3c/724039e0d848fd69dbf5806894e26479577316c6f0f112bacaf67aa889ac/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c19d1ea0673cd13cc2f872f6c9ab42acc4e4f492a7ca9d3795ce2b112dd7e15", size = 1826167 },
+ { url = "https://files.pythonhosted.org/packages/2b/5b/1b29e8c1fb5f3199a9a57c1452004ff39f494bbe9bdbe9a81e18172e40d3/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5e68c4446fe0810e959cdff46ab0a41ce2f2c86d227d96dc3847af0ba7def306", size = 1865071 },
+ { url = "https://files.pythonhosted.org/packages/89/6c/3985203863d76bb7d7266e36970d7e3b6385148c18a68cc8915fd8c84d57/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d9640b0059ff4f14d1f37321b94061c6db164fbe49b334b31643e0528d100d99", size = 2036244 },
+ { url = "https://files.pythonhosted.org/packages/0e/41/f15316858a246b5d723f7d7f599f79e37493b2e84bfc789e58d88c209f8a/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:40d02e7d45c9f8af700f3452f329ead92da4c5f4317ca9b896de7ce7199ea459", size = 2737470 },
+ { url = "https://files.pythonhosted.org/packages/a8/7c/b860618c25678bbd6d1d99dbdfdf0510ccb50790099b963ff78a124b754f/pydantic_core-2.27.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c1fd185014191700554795c99b347d64f2bb637966c4cfc16998a0ca700d048", size = 1992291 },
+ { url = "https://files.pythonhosted.org/packages/bf/73/42c3742a391eccbeab39f15213ecda3104ae8682ba3c0c28069fbcb8c10d/pydantic_core-2.27.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d81d2068e1c1228a565af076598f9e7451712700b673de8f502f0334f281387d", size = 1994613 },
+ { url = "https://files.pythonhosted.org/packages/94/7a/941e89096d1175d56f59340f3a8ebaf20762fef222c298ea96d36a6328c5/pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1a4207639fb02ec2dbb76227d7c751a20b1a6b4bc52850568e52260cae64ca3b", size = 2002355 },
+ { url = "https://files.pythonhosted.org/packages/6e/95/2359937a73d49e336a5a19848713555605d4d8d6940c3ec6c6c0ca4dcf25/pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_armv7l.whl", hash = "sha256:3de3ce3c9ddc8bbd88f6e0e304dea0e66d843ec9de1b0042b0911c1663ffd474", size = 2126661 },
+ { url = "https://files.pythonhosted.org/packages/2b/4c/ca02b7bdb6012a1adef21a50625b14f43ed4d11f1fc237f9d7490aa5078c/pydantic_core-2.27.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:30c5f68ded0c36466acede341551106821043e9afaad516adfb6e8fa80a4e6a6", size = 2153261 },
+ { url = "https://files.pythonhosted.org/packages/72/9d/a241db83f973049a1092a079272ffe2e3e82e98561ef6214ab53fe53b1c7/pydantic_core-2.27.2-cp311-cp311-win32.whl", hash = "sha256:c70c26d2c99f78b125a3459f8afe1aed4d9687c24fd677c6a4436bc042e50d6c", size = 1812361 },
+ { url = "https://files.pythonhosted.org/packages/e8/ef/013f07248041b74abd48a385e2110aa3a9bbfef0fbd97d4e6d07d2f5b89a/pydantic_core-2.27.2-cp311-cp311-win_amd64.whl", hash = "sha256:08e125dbdc505fa69ca7d9c499639ab6407cfa909214d500897d02afb816e7cc", size = 1982484 },
+ { url = "https://files.pythonhosted.org/packages/10/1c/16b3a3e3398fd29dca77cea0a1d998d6bde3902fa2706985191e2313cc76/pydantic_core-2.27.2-cp311-cp311-win_arm64.whl", hash = "sha256:26f0d68d4b235a2bae0c3fc585c585b4ecc51382db0e3ba402a22cbc440915e4", size = 1867102 },
+ { url = "https://files.pythonhosted.org/packages/d6/74/51c8a5482ca447871c93e142d9d4a92ead74de6c8dc5e66733e22c9bba89/pydantic_core-2.27.2-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:9e0c8cfefa0ef83b4da9588448b6d8d2a2bf1a53c3f1ae5fca39eb3061e2f0b0", size = 1893127 },
+ { url = "https://files.pythonhosted.org/packages/d3/f3/c97e80721735868313c58b89d2de85fa80fe8dfeeed84dc51598b92a135e/pydantic_core-2.27.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:83097677b8e3bd7eaa6775720ec8e0405f1575015a463285a92bfdfe254529ef", size = 1811340 },
+ { url = "https://files.pythonhosted.org/packages/9e/91/840ec1375e686dbae1bd80a9e46c26a1e0083e1186abc610efa3d9a36180/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:172fce187655fece0c90d90a678424b013f8fbb0ca8b036ac266749c09438cb7", size = 1822900 },
+ { url = "https://files.pythonhosted.org/packages/f6/31/4240bc96025035500c18adc149aa6ffdf1a0062a4b525c932065ceb4d868/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:519f29f5213271eeeeb3093f662ba2fd512b91c5f188f3bb7b27bc5973816934", size = 1869177 },
+ { url = "https://files.pythonhosted.org/packages/fa/20/02fbaadb7808be578317015c462655c317a77a7c8f0ef274bc016a784c54/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:05e3a55d124407fffba0dd6b0c0cd056d10e983ceb4e5dbd10dda135c31071d6", size = 2038046 },
+ { url = "https://files.pythonhosted.org/packages/06/86/7f306b904e6c9eccf0668248b3f272090e49c275bc488a7b88b0823444a4/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9c3ed807c7b91de05e63930188f19e921d1fe90de6b4f5cd43ee7fcc3525cb8c", size = 2685386 },
+ { url = "https://files.pythonhosted.org/packages/8d/f0/49129b27c43396581a635d8710dae54a791b17dfc50c70164866bbf865e3/pydantic_core-2.27.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fb4aadc0b9a0c063206846d603b92030eb6f03069151a625667f982887153e2", size = 1997060 },
+ { url = "https://files.pythonhosted.org/packages/0d/0f/943b4af7cd416c477fd40b187036c4f89b416a33d3cc0ab7b82708a667aa/pydantic_core-2.27.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:28ccb213807e037460326424ceb8b5245acb88f32f3d2777427476e1b32c48c4", size = 2004870 },
+ { url = "https://files.pythonhosted.org/packages/35/40/aea70b5b1a63911c53a4c8117c0a828d6790483f858041f47bab0b779f44/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:de3cd1899e2c279b140adde9357c4495ed9d47131b4a4eaff9052f23398076b3", size = 1999822 },
+ { url = "https://files.pythonhosted.org/packages/f2/b3/807b94fd337d58effc5498fd1a7a4d9d59af4133e83e32ae39a96fddec9d/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_armv7l.whl", hash = "sha256:220f892729375e2d736b97d0e51466252ad84c51857d4d15f5e9692f9ef12be4", size = 2130364 },
+ { url = "https://files.pythonhosted.org/packages/fc/df/791c827cd4ee6efd59248dca9369fb35e80a9484462c33c6649a8d02b565/pydantic_core-2.27.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a0fcd29cd6b4e74fe8ddd2c90330fd8edf2e30cb52acda47f06dd615ae72da57", size = 2158303 },
+ { url = "https://files.pythonhosted.org/packages/9b/67/4e197c300976af185b7cef4c02203e175fb127e414125916bf1128b639a9/pydantic_core-2.27.2-cp312-cp312-win32.whl", hash = "sha256:1e2cb691ed9834cd6a8be61228471d0a503731abfb42f82458ff27be7b2186fc", size = 1834064 },
+ { url = "https://files.pythonhosted.org/packages/1f/ea/cd7209a889163b8dcca139fe32b9687dd05249161a3edda62860430457a5/pydantic_core-2.27.2-cp312-cp312-win_amd64.whl", hash = "sha256:cc3f1a99a4f4f9dd1de4fe0312c114e740b5ddead65bb4102884b384c15d8bc9", size = 1989046 },
+ { url = "https://files.pythonhosted.org/packages/bc/49/c54baab2f4658c26ac633d798dab66b4c3a9bbf47cff5284e9c182f4137a/pydantic_core-2.27.2-cp312-cp312-win_arm64.whl", hash = "sha256:3911ac9284cd8a1792d3cb26a2da18f3ca26c6908cc434a18f730dc0db7bfa3b", size = 1885092 },
+ { url = "https://files.pythonhosted.org/packages/41/b1/9bc383f48f8002f99104e3acff6cba1231b29ef76cfa45d1506a5cad1f84/pydantic_core-2.27.2-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:7d14bd329640e63852364c306f4d23eb744e0f8193148d4044dd3dacdaacbd8b", size = 1892709 },
+ { url = "https://files.pythonhosted.org/packages/10/6c/e62b8657b834f3eb2961b49ec8e301eb99946245e70bf42c8817350cbefc/pydantic_core-2.27.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:82f91663004eb8ed30ff478d77c4d1179b3563df6cdb15c0817cd1cdaf34d154", size = 1811273 },
+ { url = "https://files.pythonhosted.org/packages/ba/15/52cfe49c8c986e081b863b102d6b859d9defc63446b642ccbbb3742bf371/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71b24c7d61131bb83df10cc7e687433609963a944ccf45190cfc21e0887b08c9", size = 1823027 },
+ { url = "https://files.pythonhosted.org/packages/b1/1c/b6f402cfc18ec0024120602bdbcebc7bdd5b856528c013bd4d13865ca473/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:fa8e459d4954f608fa26116118bb67f56b93b209c39b008277ace29937453dc9", size = 1868888 },
+ { url = "https://files.pythonhosted.org/packages/bd/7b/8cb75b66ac37bc2975a3b7de99f3c6f355fcc4d89820b61dffa8f1e81677/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce8918cbebc8da707ba805b7fd0b382816858728ae7fe19a942080c24e5b7cd1", size = 2037738 },
+ { url = "https://files.pythonhosted.org/packages/c8/f1/786d8fe78970a06f61df22cba58e365ce304bf9b9f46cc71c8c424e0c334/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eda3f5c2a021bbc5d976107bb302e0131351c2ba54343f8a496dc8783d3d3a6a", size = 2685138 },
+ { url = "https://files.pythonhosted.org/packages/a6/74/d12b2cd841d8724dc8ffb13fc5cef86566a53ed358103150209ecd5d1999/pydantic_core-2.27.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bd8086fa684c4775c27f03f062cbb9eaa6e17f064307e86b21b9e0abc9c0f02e", size = 1997025 },
+ { url = "https://files.pythonhosted.org/packages/a0/6e/940bcd631bc4d9a06c9539b51f070b66e8f370ed0933f392db6ff350d873/pydantic_core-2.27.2-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:8d9b3388db186ba0c099a6d20f0604a44eabdeef1777ddd94786cdae158729e4", size = 2004633 },
+ { url = "https://files.pythonhosted.org/packages/50/cc/a46b34f1708d82498c227d5d80ce615b2dd502ddcfd8376fc14a36655af1/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:7a66efda2387de898c8f38c0cf7f14fca0b51a8ef0b24bfea5849f1b3c95af27", size = 1999404 },
+ { url = "https://files.pythonhosted.org/packages/ca/2d/c365cfa930ed23bc58c41463bae347d1005537dc8db79e998af8ba28d35e/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_armv7l.whl", hash = "sha256:18a101c168e4e092ab40dbc2503bdc0f62010e95d292b27827871dc85450d7ee", size = 2130130 },
+ { url = "https://files.pythonhosted.org/packages/f4/d7/eb64d015c350b7cdb371145b54d96c919d4db516817f31cd1c650cae3b21/pydantic_core-2.27.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:ba5dd002f88b78a4215ed2f8ddbdf85e8513382820ba15ad5ad8955ce0ca19a1", size = 2157946 },
+ { url = "https://files.pythonhosted.org/packages/a4/99/bddde3ddde76c03b65dfd5a66ab436c4e58ffc42927d4ff1198ffbf96f5f/pydantic_core-2.27.2-cp313-cp313-win32.whl", hash = "sha256:1ebaf1d0481914d004a573394f4be3a7616334be70261007e47c2a6fe7e50130", size = 1834387 },
+ { url = "https://files.pythonhosted.org/packages/71/47/82b5e846e01b26ac6f1893d3c5f9f3a2eb6ba79be26eef0b759b4fe72946/pydantic_core-2.27.2-cp313-cp313-win_amd64.whl", hash = "sha256:953101387ecf2f5652883208769a79e48db18c6df442568a0b5ccd8c2723abee", size = 1990453 },
+ { url = "https://files.pythonhosted.org/packages/51/b2/b2b50d5ecf21acf870190ae5d093602d95f66c9c31f9d5de6062eb329ad1/pydantic_core-2.27.2-cp313-cp313-win_arm64.whl", hash = "sha256:ac4dbfd1691affb8f48c2c13241a2e3b60ff23247cbcf981759c768b6633cf8b", size = 1885186 },
+ { url = "https://files.pythonhosted.org/packages/43/53/13e9917fc69c0a4aea06fd63ed6a8d6cda9cf140ca9584d49c1650b0ef5e/pydantic_core-2.27.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d3e8d504bdd3f10835468f29008d72fc8359d95c9c415ce6e767203db6127506", size = 1899595 },
+ { url = "https://files.pythonhosted.org/packages/f4/20/26c549249769ed84877f862f7bb93f89a6ee08b4bee1ed8781616b7fbb5e/pydantic_core-2.27.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:521eb9b7f036c9b6187f0b47318ab0d7ca14bd87f776240b90b21c1f4f149320", size = 1775010 },
+ { url = "https://files.pythonhosted.org/packages/35/eb/8234e05452d92d2b102ffa1b56d801c3567e628fdc63f02080fdfc68fd5e/pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85210c4d99a0114f5a9481b44560d7d1e35e32cc5634c656bc48e590b669b145", size = 1830727 },
+ { url = "https://files.pythonhosted.org/packages/8f/df/59f915c8b929d5f61e5a46accf748a87110ba145156f9326d1a7d28912b2/pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d716e2e30c6f140d7560ef1538953a5cd1a87264c737643d481f2779fc247fe1", size = 1868393 },
+ { url = "https://files.pythonhosted.org/packages/d5/52/81cf4071dca654d485c277c581db368b0c95b2b883f4d7b736ab54f72ddf/pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f66d89ba397d92f840f8654756196d93804278457b5fbede59598a1f9f90b228", size = 2040300 },
+ { url = "https://files.pythonhosted.org/packages/9c/00/05197ce1614f5c08d7a06e1d39d5d8e704dc81971b2719af134b844e2eaf/pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:669e193c1c576a58f132e3158f9dfa9662969edb1a250c54d8fa52590045f046", size = 2738785 },
+ { url = "https://files.pythonhosted.org/packages/f7/a3/5f19bc495793546825ab160e530330c2afcee2281c02b5ffafd0b32ac05e/pydantic_core-2.27.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fdbe7629b996647b99c01b37f11170a57ae675375b14b8c13b8518b8320ced5", size = 1996493 },
+ { url = "https://files.pythonhosted.org/packages/ed/e8/e0102c2ec153dc3eed88aea03990e1b06cfbca532916b8a48173245afe60/pydantic_core-2.27.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d262606bf386a5ba0b0af3b97f37c83d7011439e3dc1a9298f21efb292e42f1a", size = 1998544 },
+ { url = "https://files.pythonhosted.org/packages/fb/a3/4be70845b555bd80aaee9f9812a7cf3df81550bce6dadb3cfee9c5d8421d/pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:cabb9bcb7e0d97f74df8646f34fc76fbf793b7f6dc2438517d7a9e50eee4f14d", size = 2007449 },
+ { url = "https://files.pythonhosted.org/packages/e3/9f/b779ed2480ba355c054e6d7ea77792467631d674b13d8257085a4bc7dcda/pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_armv7l.whl", hash = "sha256:d2d63f1215638d28221f664596b1ccb3944f6e25dd18cd3b86b0a4c408d5ebb9", size = 2129460 },
+ { url = "https://files.pythonhosted.org/packages/a0/f0/a6ab0681f6e95260c7fbf552874af7302f2ea37b459f9b7f00698f875492/pydantic_core-2.27.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:bca101c00bff0adb45a833f8451b9105d9df18accb8743b08107d7ada14bd7da", size = 2159609 },
+ { url = "https://files.pythonhosted.org/packages/8a/2b/e1059506795104349712fbca647b18b3f4a7fd541c099e6259717441e1e0/pydantic_core-2.27.2-cp38-cp38-win32.whl", hash = "sha256:f6f8e111843bbb0dee4cb6594cdc73e79b3329b526037ec242a3e49012495b3b", size = 1819886 },
+ { url = "https://files.pythonhosted.org/packages/aa/6d/df49c17f024dfc58db0bacc7b03610058018dd2ea2eaf748ccbada4c3d06/pydantic_core-2.27.2-cp38-cp38-win_amd64.whl", hash = "sha256:fd1aea04935a508f62e0d0ef1f5ae968774a32afc306fb8545e06f5ff5cdf3ad", size = 1980773 },
+ { url = "https://files.pythonhosted.org/packages/27/97/3aef1ddb65c5ccd6eda9050036c956ff6ecbfe66cb7eb40f280f121a5bb0/pydantic_core-2.27.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:c10eb4f1659290b523af58fa7cffb452a61ad6ae5613404519aee4bfbf1df993", size = 1896475 },
+ { url = "https://files.pythonhosted.org/packages/ad/d3/5668da70e373c9904ed2f372cb52c0b996426f302e0dee2e65634c92007d/pydantic_core-2.27.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:ef592d4bad47296fb11f96cd7dc898b92e795032b4894dfb4076cfccd43a9308", size = 1772279 },
+ { url = "https://files.pythonhosted.org/packages/8a/9e/e44b8cb0edf04a2f0a1f6425a65ee089c1d6f9c4c2dcab0209127b6fdfc2/pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c61709a844acc6bf0b7dce7daae75195a10aac96a596ea1b776996414791ede4", size = 1829112 },
+ { url = "https://files.pythonhosted.org/packages/1c/90/1160d7ac700102effe11616e8119e268770f2a2aa5afb935f3ee6832987d/pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:42c5f762659e47fdb7b16956c71598292f60a03aa92f8b6351504359dbdba6cf", size = 1866780 },
+ { url = "https://files.pythonhosted.org/packages/ee/33/13983426df09a36d22c15980008f8d9c77674fc319351813b5a2739b70f3/pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4c9775e339e42e79ec99c441d9730fccf07414af63eac2f0e48e08fd38a64d76", size = 2037943 },
+ { url = "https://files.pythonhosted.org/packages/01/d7/ced164e376f6747e9158c89988c293cd524ab8d215ae4e185e9929655d5c/pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:57762139821c31847cfb2df63c12f725788bd9f04bc2fb392790959b8f70f118", size = 2740492 },
+ { url = "https://files.pythonhosted.org/packages/8b/1f/3dc6e769d5b7461040778816aab2b00422427bcaa4b56cc89e9c653b2605/pydantic_core-2.27.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0d1e85068e818c73e048fe28cfc769040bb1f475524f4745a5dc621f75ac7630", size = 1995714 },
+ { url = "https://files.pythonhosted.org/packages/07/d7/a0bd09bc39283530b3f7c27033a814ef254ba3bd0b5cfd040b7abf1fe5da/pydantic_core-2.27.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:097830ed52fd9e427942ff3b9bc17fab52913b2f50f2880dc4a5611446606a54", size = 1997163 },
+ { url = "https://files.pythonhosted.org/packages/2d/bb/2db4ad1762e1c5699d9b857eeb41959191980de6feb054e70f93085e1bcd/pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:044a50963a614ecfae59bb1eaf7ea7efc4bc62f49ed594e18fa1e5d953c40e9f", size = 2005217 },
+ { url = "https://files.pythonhosted.org/packages/53/5f/23a5a3e7b8403f8dd8fc8a6f8b49f6b55c7d715b77dcf1f8ae919eeb5628/pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_armv7l.whl", hash = "sha256:4e0b4220ba5b40d727c7f879eac379b822eee5d8fff418e9d3381ee45b3b0362", size = 2127899 },
+ { url = "https://files.pythonhosted.org/packages/c2/ae/aa38bb8dd3d89c2f1d8362dd890ee8f3b967330821d03bbe08fa01ce3766/pydantic_core-2.27.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5e4f4bb20d75e9325cc9696c6802657b58bc1dbbe3022f32cc2b2b632c3fbb96", size = 2155726 },
+ { url = "https://files.pythonhosted.org/packages/98/61/4f784608cc9e98f70839187117ce840480f768fed5d386f924074bf6213c/pydantic_core-2.27.2-cp39-cp39-win32.whl", hash = "sha256:cca63613e90d001b9f2f9a9ceb276c308bfa2a43fafb75c8031c4f66039e8c6e", size = 1817219 },
+ { url = "https://files.pythonhosted.org/packages/57/82/bb16a68e4a1a858bb3768c2c8f1ff8d8978014e16598f001ea29a25bf1d1/pydantic_core-2.27.2-cp39-cp39-win_amd64.whl", hash = "sha256:77d1bca19b0f7021b3a982e6f903dcd5b2b06076def36a652e3907f596e29f67", size = 1985382 },
+ { url = "https://files.pythonhosted.org/packages/46/72/af70981a341500419e67d5cb45abe552a7c74b66326ac8877588488da1ac/pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2bf14caea37e91198329b828eae1618c068dfb8ef17bb33287a7ad4b61ac314e", size = 1891159 },
+ { url = "https://files.pythonhosted.org/packages/ad/3d/c5913cccdef93e0a6a95c2d057d2c2cba347815c845cda79ddd3c0f5e17d/pydantic_core-2.27.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:b0cb791f5b45307caae8810c2023a184c74605ec3bcbb67d13846c28ff731ff8", size = 1768331 },
+ { url = "https://files.pythonhosted.org/packages/f6/f0/a3ae8fbee269e4934f14e2e0e00928f9346c5943174f2811193113e58252/pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:688d3fd9fcb71f41c4c015c023d12a79d1c4c0732ec9eb35d96e3388a120dcf3", size = 1822467 },
+ { url = "https://files.pythonhosted.org/packages/d7/7a/7bbf241a04e9f9ea24cd5874354a83526d639b02674648af3f350554276c/pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d591580c34f4d731592f0e9fe40f9cc1b430d297eecc70b962e93c5c668f15f", size = 1979797 },
+ { url = "https://files.pythonhosted.org/packages/4f/5f/4784c6107731f89e0005a92ecb8a2efeafdb55eb992b8e9d0a2be5199335/pydantic_core-2.27.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:82f986faf4e644ffc189a7f1aafc86e46ef70372bb153e7001e8afccc6e54133", size = 1987839 },
+ { url = "https://files.pythonhosted.org/packages/6d/a7/61246562b651dff00de86a5f01b6e4befb518df314c54dec187a78d81c84/pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:bec317a27290e2537f922639cafd54990551725fc844249e64c523301d0822fc", size = 1998861 },
+ { url = "https://files.pythonhosted.org/packages/86/aa/837821ecf0c022bbb74ca132e117c358321e72e7f9702d1b6a03758545e2/pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:0296abcb83a797db256b773f45773da397da75a08f5fcaef41f2044adec05f50", size = 2116582 },
+ { url = "https://files.pythonhosted.org/packages/81/b0/5e74656e95623cbaa0a6278d16cf15e10a51f6002e3ec126541e95c29ea3/pydantic_core-2.27.2-pp310-pypy310_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:0d75070718e369e452075a6017fbf187f788e17ed67a3abd47fa934d001863d9", size = 2151985 },
+ { url = "https://files.pythonhosted.org/packages/63/37/3e32eeb2a451fddaa3898e2163746b0cffbbdbb4740d38372db0490d67f3/pydantic_core-2.27.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:7e17b560be3c98a8e3aa66ce828bdebb9e9ac6ad5466fba92eb74c4c95cb1151", size = 2004715 },
+ { url = "https://files.pythonhosted.org/packages/29/0e/dcaea00c9dbd0348b723cae82b0e0c122e0fa2b43fa933e1622fd237a3ee/pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c33939a82924da9ed65dab5a65d427205a73181d8098e79b6b426bdf8ad4e656", size = 1891733 },
+ { url = "https://files.pythonhosted.org/packages/86/d3/e797bba8860ce650272bda6383a9d8cad1d1c9a75a640c9d0e848076f85e/pydantic_core-2.27.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:00bad2484fa6bda1e216e7345a798bd37c68fb2d97558edd584942aa41b7d278", size = 1768375 },
+ { url = "https://files.pythonhosted.org/packages/41/f7/f847b15fb14978ca2b30262548f5fc4872b2724e90f116393eb69008299d/pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c817e2b40aba42bac6f457498dacabc568c3b7a986fc9ba7c8d9d260b71485fb", size = 1822307 },
+ { url = "https://files.pythonhosted.org/packages/9c/63/ed80ec8255b587b2f108e514dc03eed1546cd00f0af281e699797f373f38/pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:251136cdad0cb722e93732cb45ca5299fb56e1344a833640bf93b2803f8d1bfd", size = 1979971 },
+ { url = "https://files.pythonhosted.org/packages/a9/6d/6d18308a45454a0de0e975d70171cadaf454bc7a0bf86b9c7688e313f0bb/pydantic_core-2.27.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d2088237af596f0a524d3afc39ab3b036e8adb054ee57cbb1dcf8e09da5b29cc", size = 1987616 },
+ { url = "https://files.pythonhosted.org/packages/82/8a/05f8780f2c1081b800a7ca54c1971e291c2d07d1a50fb23c7e4aef4ed403/pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_aarch64.whl", hash = "sha256:d4041c0b966a84b4ae7a09832eb691a35aec90910cd2dbe7a208de59be77965b", size = 1998943 },
+ { url = "https://files.pythonhosted.org/packages/5e/3e/fe5b6613d9e4c0038434396b46c5303f5ade871166900b357ada4766c5b7/pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_armv7l.whl", hash = "sha256:8083d4e875ebe0b864ffef72a4304827015cff328a1be6e22cc850753bfb122b", size = 2116654 },
+ { url = "https://files.pythonhosted.org/packages/db/ad/28869f58938fad8cc84739c4e592989730bfb69b7c90a8fff138dff18e1e/pydantic_core-2.27.2-pp39-pypy39_pp73-musllinux_1_1_x86_64.whl", hash = "sha256:f141ee28a0ad2123b6611b6ceff018039df17f32ada8b534e6aa039545a3efb2", size = 2152292 },
+ { url = "https://files.pythonhosted.org/packages/a1/0c/c5c5cd3689c32ed1fe8c5d234b079c12c281c051759770c05b8bed6412b5/pydantic_core-2.27.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7d0c8399fcc1848491f00e0314bd59fb34a9c008761bcb422a057670c3f65e35", size = 2004961 },
+]
+
+[[package]]
+name = "replicate-stainless"
+version = "0.1.0a8"
+source = { editable = "." }
+dependencies = [
+ { name = "anyio" },
+ { name = "asyncio" },
+ { name = "distro" },
+ { name = "httpx" },
+ { name = "pydantic" },
+ { name = "sniffio" },
+ { name = "typing-extensions" },
+]
+
+[package.metadata]
+requires-dist = [
+ { name = "anyio", specifier = ">=3.5.0,<5" },
+ { name = "asyncio", specifier = ">=3.4.3" },
+ { name = "distro", specifier = ">=1.7.0,<2" },
+ { name = "httpx", specifier = ">=0.23.0,<1" },
+ { name = "pydantic", specifier = ">=1.9.0,<3" },
+ { name = "sniffio" },
+ { name = "typing-extensions", specifier = ">=4.10,<5" },
+]
+
+[[package]]
+name = "sniffio"
+version = "1.3.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/a2/87/a6771e1546d97e7e041b6ae58d80074f81b7d5121207425c964ddf5cfdbd/sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc", size = 20372 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e9/44/75a9c9421471a6c4805dbf2356f7c181a29c1879239abab1ea2cc8f38b40/sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2", size = 10235 },
+]
+
+[[package]]
+name = "typing-extensions"
+version = "4.13.2"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/f6/37/23083fcd6e35492953e8d2aaaa68b860eb422b34627b13f2ce3eb6106061/typing_extensions-4.13.2.tar.gz", hash = "sha256:e6c81219bd689f51865d9e372991c540bda33a0379d5573cddb9a3a23f7caaef", size = 106967 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/8b/54/b1ae86c0973cc6f0210b53d508ca3641fb6d0c56823f288d108bc7ab3cc8/typing_extensions-4.13.2-py3-none-any.whl", hash = "sha256:a439e7c04b49fec3e5d3e2beaa21755cadbbdc391694e28ccdd36ca4a1408f8c", size = 45806 },
+]