diff --git a/.release-please-manifest.json b/.release-please-manifest.json
index f391d416..27d2fbbc 100644
--- a/.release-please-manifest.json
+++ b/.release-please-manifest.json
@@ -1,3 +1,3 @@
{
- ".": "3.6.0"
+ ".": "3.7.0"
}
\ No newline at end of file
diff --git a/.stats.yml b/.stats.yml
index af483f0a..425463f6 100644
--- a/.stats.yml
+++ b/.stats.yml
@@ -1,4 +1,4 @@
-configured_endpoints: 175
-openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-cb3bf9b21459cad24410206c27a32fd31ef6cf86711700597549dbbd0d634002.yml
-openapi_spec_hash: 6a9149a81ba15e7c5c5c1f4d77daad92
-config_hash: bad49c3bf949d5168ec3896bedff253a
+configured_endpoints: 188
+openapi_spec_url: https://storage.googleapis.com/stainless-sdk-openapi-specs/digitalocean%2Fgradient-f07d74847e620dfa26d8df40ea4680814af9bba381b3a57a7b6ed76ad49d85f8.yml
+openapi_spec_hash: e3553dc2abf2afd4368b736bcc32a289
+config_hash: b712366a70c9d33e22d40eb601ca972f
diff --git a/CHANGELOG.md b/CHANGELOG.md
index b8cc8f24..27493281 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,32 @@
# Changelog
+## 3.7.0 (2025-11-19)
+
+Full Changelog: [v3.6.0...v3.7.0](https://github.com/digitalocean/gradient-python/compare/v3.6.0...v3.7.0)
+
+### Features
+
+* add wait_for_completion method to IndexingJobs resource with sy… ([#49](https://github.com/digitalocean/gradient-python/issues/49)) ([9edc2a6](https://github.com/digitalocean/gradient-python/commit/9edc2a60f5aa49749e151477615bbecb3a79e92b))
+* Add wait_until_ready() method for agent deployment polling ([#56](https://github.com/digitalocean/gradient-python/issues/56)) ([dcef3d5](https://github.com/digitalocean/gradient-python/commit/dcef3d5ebb4ef903c0c91aa4008853bb978f5544))
+* **api:** add inference errors ([d61d495](https://github.com/digitalocean/gradient-python/commit/d61d4955f596d9ac1bebc9387a6573989e823022))
+* **api:** include indexing jobs ([d249d06](https://github.com/digitalocean/gradient-python/commit/d249d0606e26d585eb2b7859948a796ea7860f53))
+
+
+### Bug Fixes
+
+* **client:** close streams without requiring full consumption ([33fe04b](https://github.com/digitalocean/gradient-python/commit/33fe04b2e4ab71094ee13e7b83d4c04867e7d485))
+* compat with Python 3.14 ([add7b21](https://github.com/digitalocean/gradient-python/commit/add7b21b9fbb8987641d5520da638647fe27b159))
+* **compat:** update signatures of `model_dump` and `model_dump_json` for Pydantic v1 ([c945870](https://github.com/digitalocean/gradient-python/commit/c945870a31840d553cb1e3a75314f1c884a56060))
+
+
+### Chores
+
+* bump `httpx-aiohttp` version to 0.1.9 ([db39cc6](https://github.com/digitalocean/gradient-python/commit/db39cc63fb126ac81edfe2cb991493d10a2d0936))
+* **internal/tests:** avoid race condition with implicit client cleanup ([e0202bb](https://github.com/digitalocean/gradient-python/commit/e0202bb915613872095f7f223a49c4480e50be98))
+* **internal:** grammar fix (it's -> its) ([c6ffb3b](https://github.com/digitalocean/gradient-python/commit/c6ffb3becbcb99e36992934fac20d67a6a3b967c))
+* merge issues in test_client.py ([#87](https://github.com/digitalocean/gradient-python/issues/87)) ([62fc025](https://github.com/digitalocean/gradient-python/commit/62fc02512e941c6af18b11c19df8828cca31159d))
+* **package:** drop Python 3.8 support ([825b1e4](https://github.com/digitalocean/gradient-python/commit/825b1e4f8b257fc103c0d45743133bbc81ca3e10))
+
## 3.6.0 (2025-10-16)
Full Changelog: [v3.5.0...v3.6.0](https://github.com/digitalocean/gradient-python/compare/v3.5.0...v3.6.0)
diff --git a/README.md b/README.md
index c9186c03..a3029a83 100644
--- a/README.md
+++ b/README.md
@@ -6,7 +6,7 @@
[)](https://pypi.org/project/gradient/)
[](https://gradientai.digitalocean.com/getting-started/overview/)
-The Gradient Python library provides convenient access to the Gradient REST API from any Python 3.8+
+The Gradient Python library provides convenient access to the Gradient REST API from any Python 3.9+
application. The library includes type definitions for all request params and response fields,
and offers both synchronous and asynchronous clients powered by [httpx](https://github.com/encode/httpx).
@@ -528,7 +528,7 @@ print(gradient.__version__)
## Requirements
-Python 3.8 or higher.
+Python 3.9 or higher.
## Contributing
diff --git a/api.md b/api.md
index a2325441..49135772 100644
--- a/api.md
+++ b/api.md
@@ -243,6 +243,24 @@ Methods:
- client.agents.evaluation_metrics.oauth2.dropbox.create_tokens(\*\*params) -> DropboxCreateTokensResponse
+### ScheduledIndexing
+
+Types:
+
+```python
+from gradient.types.agents.evaluation_metrics import (
+ ScheduledIndexingCreateResponse,
+ ScheduledIndexingRetrieveResponse,
+ ScheduledIndexingDeleteResponse,
+)
+```
+
+Methods:
+
+- client.agents.evaluation_metrics.scheduled_indexing.create(\*\*params) -> ScheduledIndexingCreateResponse
+- client.agents.evaluation_metrics.scheduled_indexing.retrieve(knowledge_base_uuid) -> ScheduledIndexingRetrieveResponse
+- client.agents.evaluation_metrics.scheduled_indexing.delete(uuid) -> ScheduledIndexingDeleteResponse
+
## EvaluationRuns
Types:
@@ -825,6 +843,7 @@ from gradient.types import (
KnowledgeBaseUpdateResponse,
KnowledgeBaseListResponse,
KnowledgeBaseDeleteResponse,
+ KnowledgeBaseListIndexingJobsResponse,
)
```
@@ -835,6 +854,7 @@ Methods:
- client.knowledge_bases.update(path_uuid, \*\*params) -> KnowledgeBaseUpdateResponse
- client.knowledge_bases.list(\*\*params) -> KnowledgeBaseListResponse
- client.knowledge_bases.delete(uuid) -> KnowledgeBaseDeleteResponse
+- client.knowledge_bases.list_indexing_jobs(knowledge_base_uuid) -> KnowledgeBaseListIndexingJobsResponse
## DataSources
@@ -873,6 +893,7 @@ from gradient.types.knowledge_bases import (
IndexingJobRetrieveResponse,
IndexingJobListResponse,
IndexingJobRetrieveDataSourcesResponse,
+ IndexingJobRetrieveSignedURLResponse,
IndexingJobUpdateCancelResponse,
)
```
@@ -883,6 +904,7 @@ Methods:
- client.knowledge_bases.indexing_jobs.retrieve(uuid) -> IndexingJobRetrieveResponse
- client.knowledge_bases.indexing_jobs.list(\*\*params) -> IndexingJobListResponse
- client.knowledge_bases.indexing_jobs.retrieve_data_sources(indexing_job_uuid) -> IndexingJobRetrieveDataSourcesResponse
+- client.knowledge_bases.indexing_jobs.retrieve_signed_url(indexing_job_uuid) -> IndexingJobRetrieveSignedURLResponse
- client.knowledge_bases.indexing_jobs.update_cancel(path_uuid, \*\*params) -> IndexingJobUpdateCancelResponse
# Models
@@ -982,3 +1004,38 @@ Methods:
- client.databases.schema_registry.config.update(database_cluster_uuid, \*\*params) -> ConfigUpdateResponse
- client.databases.schema_registry.config.retrieve_subject(subject_name, \*, database_cluster_uuid) -> ConfigRetrieveSubjectResponse
- client.databases.schema_registry.config.update_subject(subject_name, \*, database_cluster_uuid, \*\*params) -> ConfigUpdateSubjectResponse
+
+# Nfs
+
+Types:
+
+```python
+from gradient.types import (
+ NfCreateResponse,
+ NfRetrieveResponse,
+ NfListResponse,
+ NfInitiateActionResponse,
+)
+```
+
+Methods:
+
+- client.nfs.create(\*\*params) -> NfCreateResponse
+- client.nfs.retrieve(nfs_id, \*\*params) -> NfRetrieveResponse
+- client.nfs.list(\*\*params) -> NfListResponse
+- client.nfs.delete(nfs_id, \*\*params) -> None
+- client.nfs.initiate_action(nfs_id, \*\*params) -> NfInitiateActionResponse
+
+## Snapshots
+
+Types:
+
+```python
+from gradient.types.nfs import SnapshotRetrieveResponse, SnapshotListResponse
+```
+
+Methods:
+
+- client.nfs.snapshots.retrieve(nfs_snapshot_id, \*\*params) -> SnapshotRetrieveResponse
+- client.nfs.snapshots.list(\*\*params) -> SnapshotListResponse
+- client.nfs.snapshots.delete(nfs_snapshot_id, \*\*params) -> None
diff --git a/pyproject.toml b/pyproject.toml
index 0e83a25b..3832f578 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "gradient"
-version = "3.6.0"
+version = "3.7.0"
description = "The official Python library for the Gradient API"
dynamic = ["readme"]
license = "Apache-2.0"
@@ -13,11 +13,10 @@ dependencies = [
"distro>=1.7.0, <2",
"sniffio",
]
-requires-python = ">= 3.8"
+requires-python = ">= 3.9"
classifiers = [
"Typing :: Typed",
"Intended Audience :: Developers",
- "Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
@@ -37,7 +36,7 @@ Homepage = "https://github.com/digitalocean/gradient-python"
Repository = "https://github.com/digitalocean/gradient-python"
[project.optional-dependencies]
-aiohttp = ["aiohttp", "httpx_aiohttp>=0.1.8"]
+aiohttp = ["aiohttp", "httpx_aiohttp>=0.1.9"]
[tool.rye]
managed = true
@@ -131,7 +130,7 @@ markers = [
# there are a couple of flags that are still disabled by
# default in strict mode as they are experimental and niche.
typeCheckingMode = "strict"
-pythonVersion = "3.8"
+pythonVersion = "3.9"
exclude = [
"_dev",
diff --git a/requirements-dev.lock b/requirements-dev.lock
index 896c8c3a..e5307af8 100644
--- a/requirements-dev.lock
+++ b/requirements-dev.lock
@@ -56,7 +56,7 @@ httpx==0.28.1
# via gradient
# via httpx-aiohttp
# via respx
-httpx-aiohttp==0.1.8
+httpx-aiohttp==0.1.9
# via gradient
idna==3.4
# via anyio
diff --git a/requirements.lock b/requirements.lock
index 1fce47a6..8c60e6c5 100644
--- a/requirements.lock
+++ b/requirements.lock
@@ -43,7 +43,7 @@ httpcore==1.0.9
httpx==0.28.1
# via gradient
# via httpx-aiohttp
-httpx-aiohttp==0.1.8
+httpx-aiohttp==0.1.9
# via gradient
idna==3.4
# via anyio
diff --git a/src/gradient/_client.py b/src/gradient/_client.py
index 58bc42d9..f7891ddf 100644
--- a/src/gradient/_client.py
+++ b/src/gradient/_client.py
@@ -33,6 +33,7 @@
if TYPE_CHECKING:
from .resources import (
+ nfs,
chat,
agents,
images,
@@ -44,6 +45,7 @@
knowledge_bases,
)
from .resources.images import ImagesResource, AsyncImagesResource
+ from .resources.nfs.nfs import NfsResource, AsyncNfsResource
from .resources.regions import RegionsResource, AsyncRegionsResource
from .resources.chat.chat import ChatResource, AsyncChatResource
from .resources.gpu_droplets import (
@@ -135,7 +137,10 @@ def __init__(
self._agent_endpoint = agent_endpoint
if inference_endpoint is None:
- inference_endpoint = os.environ.get("GRADIENT_INFERENCE_ENDPOINT") or "https://inference.do-ai.run"
+ inference_endpoint = (
+ os.environ.get("GRADIENT_INFERENCE_ENDPOINT")
+ or "https://inference.do-ai.run"
+ )
self.inference_endpoint = inference_endpoint
if base_url is None:
@@ -226,6 +231,12 @@ def databases(self) -> DatabasesResource:
return DatabasesResource(self)
+ @cached_property
+ def nfs(self) -> NfsResource:
+ from .resources.nfs import NfsResource
+
+ return NfsResource(self)
+
@cached_property
def with_raw_response(self) -> GradientWithRawResponse:
return GradientWithRawResponse(self)
@@ -276,7 +287,9 @@ def default_headers(self) -> dict[str, str | Omit]:
@override
def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None:
- if (self.access_token or self.agent_access_key or self.model_access_key) and headers.get("Authorization"):
+ if (
+ self.access_token or self.agent_access_key or self.model_access_key
+ ) and headers.get("Authorization"):
return
if isinstance(custom_headers.get("Authorization"), Omit):
return
@@ -329,10 +342,14 @@ def copy(
Create a new client instance re-using the same options given to the current client with optional overriding.
"""
if default_headers is not None and set_default_headers is not None:
- raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive")
+ raise ValueError(
+ "The `default_headers` and `set_default_headers` arguments are mutually exclusive"
+ )
if default_query is not None and set_default_query is not None:
- raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive")
+ raise ValueError(
+ "The `default_query` and `set_default_query` arguments are mutually exclusive"
+ )
headers = self._custom_headers
if default_headers is not None:
@@ -382,10 +399,14 @@ def _make_status_error(
return _exceptions.BadRequestError(err_msg, response=response, body=body)
if response.status_code == 401:
- return _exceptions.AuthenticationError(err_msg, response=response, body=body)
+ return _exceptions.AuthenticationError(
+ err_msg, response=response, body=body
+ )
if response.status_code == 403:
- return _exceptions.PermissionDeniedError(err_msg, response=response, body=body)
+ return _exceptions.PermissionDeniedError(
+ err_msg, response=response, body=body
+ )
if response.status_code == 404:
return _exceptions.NotFoundError(err_msg, response=response, body=body)
@@ -394,13 +415,17 @@ def _make_status_error(
return _exceptions.ConflictError(err_msg, response=response, body=body)
if response.status_code == 422:
- return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body)
+ return _exceptions.UnprocessableEntityError(
+ err_msg, response=response, body=body
+ )
if response.status_code == 429:
return _exceptions.RateLimitError(err_msg, response=response, body=body)
if response.status_code >= 500:
- return _exceptions.InternalServerError(err_msg, response=response, body=body)
+ return _exceptions.InternalServerError(
+ err_msg, response=response, body=body
+ )
return APIStatusError(err_msg, response=response, body=body)
@@ -468,7 +493,10 @@ def __init__(
self._agent_endpoint = agent_endpoint
if inference_endpoint is None:
- inference_endpoint = os.environ.get("GRADIENT_INFERENCE_ENDPOINT") or "https://inference.do-ai.run"
+ inference_endpoint = (
+ os.environ.get("GRADIENT_INFERENCE_ENDPOINT")
+ or "https://inference.do-ai.run"
+ )
self.inference_endpoint = inference_endpoint
if base_url is None:
@@ -559,6 +587,12 @@ def databases(self) -> AsyncDatabasesResource:
return AsyncDatabasesResource(self)
+ @cached_property
+ def nfs(self) -> AsyncNfsResource:
+ from .resources.nfs import AsyncNfsResource
+
+ return AsyncNfsResource(self)
+
@cached_property
def with_raw_response(self) -> AsyncGradientWithRawResponse:
return AsyncGradientWithRawResponse(self)
@@ -609,7 +643,9 @@ def default_headers(self) -> dict[str, str | Omit]:
@override
def _validate_headers(self, headers: Headers, custom_headers: Headers) -> None:
- if (self.access_token or self.agent_access_key or self.model_access_key) and headers.get("Authorization"):
+ if (
+ self.access_token or self.agent_access_key or self.model_access_key
+ ) and headers.get("Authorization"):
return
if isinstance(custom_headers.get("Authorization"), Omit):
return
@@ -662,10 +698,14 @@ def copy(
Create a new client instance re-using the same options given to the current client with optional overriding.
"""
if default_headers is not None and set_default_headers is not None:
- raise ValueError("The `default_headers` and `set_default_headers` arguments are mutually exclusive")
+ raise ValueError(
+ "The `default_headers` and `set_default_headers` arguments are mutually exclusive"
+ )
if default_query is not None and set_default_query is not None:
- raise ValueError("The `default_query` and `set_default_query` arguments are mutually exclusive")
+ raise ValueError(
+ "The `default_query` and `set_default_query` arguments are mutually exclusive"
+ )
headers = self._custom_headers
if default_headers is not None:
@@ -715,10 +755,14 @@ def _make_status_error(
return _exceptions.BadRequestError(err_msg, response=response, body=body)
if response.status_code == 401:
- return _exceptions.AuthenticationError(err_msg, response=response, body=body)
+ return _exceptions.AuthenticationError(
+ err_msg, response=response, body=body
+ )
if response.status_code == 403:
- return _exceptions.PermissionDeniedError(err_msg, response=response, body=body)
+ return _exceptions.PermissionDeniedError(
+ err_msg, response=response, body=body
+ )
if response.status_code == 404:
return _exceptions.NotFoundError(err_msg, response=response, body=body)
@@ -727,13 +771,17 @@ def _make_status_error(
return _exceptions.ConflictError(err_msg, response=response, body=body)
if response.status_code == 422:
- return _exceptions.UnprocessableEntityError(err_msg, response=response, body=body)
+ return _exceptions.UnprocessableEntityError(
+ err_msg, response=response, body=body
+ )
if response.status_code == 429:
return _exceptions.RateLimitError(err_msg, response=response, body=body)
if response.status_code >= 500:
- return _exceptions.InternalServerError(err_msg, response=response, body=body)
+ return _exceptions.InternalServerError(
+ err_msg, response=response, body=body
+ )
return APIStatusError(err_msg, response=response, body=body)
@@ -797,6 +845,12 @@ def databases(self) -> databases.DatabasesResourceWithRawResponse:
return DatabasesResourceWithRawResponse(self._client.databases)
+ @cached_property
+ def nfs(self) -> nfs.NfsResourceWithRawResponse:
+ from .resources.nfs import NfsResourceWithRawResponse
+
+ return NfsResourceWithRawResponse(self._client.nfs)
+
class AsyncGradientWithRawResponse:
_client: AsyncGradient
@@ -862,6 +916,12 @@ def databases(self) -> databases.AsyncDatabasesResourceWithRawResponse:
return AsyncDatabasesResourceWithRawResponse(self._client.databases)
+ @cached_property
+ def nfs(self) -> nfs.AsyncNfsResourceWithRawResponse:
+ from .resources.nfs import AsyncNfsResourceWithRawResponse
+
+ return AsyncNfsResourceWithRawResponse(self._client.nfs)
+
class GradientWithStreamedResponse:
_client: Gradient
@@ -927,6 +987,12 @@ def databases(self) -> databases.DatabasesResourceWithStreamingResponse:
return DatabasesResourceWithStreamingResponse(self._client.databases)
+ @cached_property
+ def nfs(self) -> nfs.NfsResourceWithStreamingResponse:
+ from .resources.nfs import NfsResourceWithStreamingResponse
+
+ return NfsResourceWithStreamingResponse(self._client.nfs)
+
class AsyncGradientWithStreamedResponse:
_client: AsyncGradient
@@ -976,7 +1042,9 @@ def knowledge_bases(
AsyncKnowledgeBasesResourceWithStreamingResponse,
)
- return AsyncKnowledgeBasesResourceWithStreamingResponse(self._client.knowledge_bases)
+ return AsyncKnowledgeBasesResourceWithStreamingResponse(
+ self._client.knowledge_bases
+ )
@cached_property
def models(self) -> models.AsyncModelsResourceWithStreamingResponse:
@@ -996,6 +1064,12 @@ def databases(self) -> databases.AsyncDatabasesResourceWithStreamingResponse:
return AsyncDatabasesResourceWithStreamingResponse(self._client.databases)
+ @cached_property
+ def nfs(self) -> nfs.AsyncNfsResourceWithStreamingResponse:
+ from .resources.nfs import AsyncNfsResourceWithStreamingResponse
+
+ return AsyncNfsResourceWithStreamingResponse(self._client.nfs)
+
Client = Gradient
diff --git a/src/gradient/_models.py b/src/gradient/_models.py
index 6a3cd1d2..ca9500b2 100644
--- a/src/gradient/_models.py
+++ b/src/gradient/_models.py
@@ -2,6 +2,7 @@
import os
import inspect
+import weakref
from typing import TYPE_CHECKING, Any, Type, Union, Generic, TypeVar, Callable, Optional, cast
from datetime import date, datetime
from typing_extensions import (
@@ -256,15 +257,16 @@ def model_dump(
mode: Literal["json", "python"] | str = "python",
include: IncEx | None = None,
exclude: IncEx | None = None,
+ context: Any | None = None,
by_alias: bool | None = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
+ exclude_computed_fields: bool = False,
round_trip: bool = False,
warnings: bool | Literal["none", "warn", "error"] = True,
- context: dict[str, Any] | None = None,
- serialize_as_any: bool = False,
fallback: Callable[[Any], Any] | None = None,
+ serialize_as_any: bool = False,
) -> dict[str, Any]:
"""Usage docs: https://docs.pydantic.dev/2.4/concepts/serialization/#modelmodel_dump
@@ -272,16 +274,24 @@ def model_dump(
Args:
mode: The mode in which `to_python` should run.
- If mode is 'json', the dictionary will only contain JSON serializable types.
- If mode is 'python', the dictionary may contain any Python objects.
- include: A list of fields to include in the output.
- exclude: A list of fields to exclude from the output.
+ If mode is 'json', the output will only contain JSON serializable types.
+ If mode is 'python', the output may contain non-JSON-serializable Python objects.
+ include: A set of fields to include in the output.
+ exclude: A set of fields to exclude from the output.
+ context: Additional context to pass to the serializer.
by_alias: Whether to use the field's alias in the dictionary key if defined.
- exclude_unset: Whether to exclude fields that are unset or None from the output.
- exclude_defaults: Whether to exclude fields that are set to their default value from the output.
- exclude_none: Whether to exclude fields that have a value of `None` from the output.
- round_trip: Whether to enable serialization and deserialization round-trip support.
- warnings: Whether to log warnings when invalid fields are encountered.
+ exclude_unset: Whether to exclude fields that have not been explicitly set.
+ exclude_defaults: Whether to exclude fields that are set to their default value.
+ exclude_none: Whether to exclude fields that have a value of `None`.
+ exclude_computed_fields: Whether to exclude computed fields.
+ While this can be useful for round-tripping, it is usually recommended to use the dedicated
+ `round_trip` parameter instead.
+ round_trip: If True, dumped values should be valid as input for non-idempotent types such as Json[T].
+ warnings: How to handle serialization errors. False/"none" ignores them, True/"warn" logs errors,
+ "error" raises a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError].
+ fallback: A function to call when an unknown value is encountered. If not provided,
+ a [`PydanticSerializationError`][pydantic_core.PydanticSerializationError] error is raised.
+ serialize_as_any: Whether to serialize fields with duck-typing serialization behavior.
Returns:
A dictionary representation of the model.
@@ -298,6 +308,8 @@ def model_dump(
raise ValueError("serialize_as_any is only supported in Pydantic v2")
if fallback is not None:
raise ValueError("fallback is only supported in Pydantic v2")
+ if exclude_computed_fields != False:
+ raise ValueError("exclude_computed_fields is only supported in Pydantic v2")
dumped = super().dict( # pyright: ignore[reportDeprecated]
include=include,
exclude=exclude,
@@ -314,15 +326,17 @@ def model_dump_json(
self,
*,
indent: int | None = None,
+ ensure_ascii: bool = False,
include: IncEx | None = None,
exclude: IncEx | None = None,
+ context: Any | None = None,
by_alias: bool | None = None,
exclude_unset: bool = False,
exclude_defaults: bool = False,
exclude_none: bool = False,
+ exclude_computed_fields: bool = False,
round_trip: bool = False,
warnings: bool | Literal["none", "warn", "error"] = True,
- context: dict[str, Any] | None = None,
fallback: Callable[[Any], Any] | None = None,
serialize_as_any: bool = False,
) -> str:
@@ -354,6 +368,10 @@ def model_dump_json(
raise ValueError("serialize_as_any is only supported in Pydantic v2")
if fallback is not None:
raise ValueError("fallback is only supported in Pydantic v2")
+ if ensure_ascii != False:
+ raise ValueError("ensure_ascii is only supported in Pydantic v2")
+ if exclude_computed_fields != False:
+ raise ValueError("exclude_computed_fields is only supported in Pydantic v2")
return super().json( # type: ignore[reportDeprecated]
indent=indent,
include=include,
@@ -573,6 +591,9 @@ class CachedDiscriminatorType(Protocol):
__discriminator__: DiscriminatorDetails
+DISCRIMINATOR_CACHE: weakref.WeakKeyDictionary[type, DiscriminatorDetails] = weakref.WeakKeyDictionary()
+
+
class DiscriminatorDetails:
field_name: str
"""The name of the discriminator field in the variant class, e.g.
@@ -615,8 +636,9 @@ def __init__(
def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any, ...]) -> DiscriminatorDetails | None:
- if isinstance(union, CachedDiscriminatorType):
- return union.__discriminator__
+ cached = DISCRIMINATOR_CACHE.get(union)
+ if cached is not None:
+ return cached
discriminator_field_name: str | None = None
@@ -669,7 +691,7 @@ def _build_discriminated_union_meta(*, union: type, meta_annotations: tuple[Any,
discriminator_field=discriminator_field_name,
discriminator_alias=discriminator_alias,
)
- cast(CachedDiscriminatorType, union).__discriminator__ = details
+ DISCRIMINATOR_CACHE.setdefault(union, details)
return details
diff --git a/src/gradient/_streaming.py b/src/gradient/_streaming.py
index eb9be89d..df2a5870 100644
--- a/src/gradient/_streaming.py
+++ b/src/gradient/_streaming.py
@@ -76,9 +76,8 @@ def __stream__(self) -> Iterator[_T]:
yield process_data(data=data, cast_to=cast_to, response=response)
- # Ensure the entire stream is consumed
- for _sse in iterator:
- ...
+ # As we might not fully consume the response stream, we need to close it explicitly
+ response.close()
def __enter__(self) -> Self:
return self
@@ -158,9 +157,8 @@ async def __stream__(self) -> AsyncIterator[_T]:
yield process_data(data=data, cast_to=cast_to, response=response)
- # Ensure the entire stream is consumed
- async for _sse in iterator:
- ...
+ # As we might not fully consume the response stream, we need to close it explicitly
+ await response.aclose()
async def __aenter__(self) -> Self:
return self
diff --git a/src/gradient/_utils/_sync.py b/src/gradient/_utils/_sync.py
index ad7ec71b..f6027c18 100644
--- a/src/gradient/_utils/_sync.py
+++ b/src/gradient/_utils/_sync.py
@@ -1,10 +1,8 @@
from __future__ import annotations
-import sys
import asyncio
import functools
-import contextvars
-from typing import Any, TypeVar, Callable, Awaitable
+from typing import TypeVar, Callable, Awaitable
from typing_extensions import ParamSpec
import anyio
@@ -15,34 +13,11 @@
T_ParamSpec = ParamSpec("T_ParamSpec")
-if sys.version_info >= (3, 9):
- _asyncio_to_thread = asyncio.to_thread
-else:
- # backport of https://docs.python.org/3/library/asyncio-task.html#asyncio.to_thread
- # for Python 3.8 support
- async def _asyncio_to_thread(
- func: Callable[T_ParamSpec, T_Retval], /, *args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs
- ) -> Any:
- """Asynchronously run function *func* in a separate thread.
-
- Any *args and **kwargs supplied for this function are directly passed
- to *func*. Also, the current :class:`contextvars.Context` is propagated,
- allowing context variables from the main thread to be accessed in the
- separate thread.
-
- Returns a coroutine that can be awaited to get the eventual result of *func*.
- """
- loop = asyncio.events.get_running_loop()
- ctx = contextvars.copy_context()
- func_call = functools.partial(ctx.run, func, *args, **kwargs)
- return await loop.run_in_executor(None, func_call)
-
-
async def to_thread(
func: Callable[T_ParamSpec, T_Retval], /, *args: T_ParamSpec.args, **kwargs: T_ParamSpec.kwargs
) -> T_Retval:
if sniffio.current_async_library() == "asyncio":
- return await _asyncio_to_thread(func, *args, **kwargs)
+ return await asyncio.to_thread(func, *args, **kwargs)
return await anyio.to_thread.run_sync(
functools.partial(func, *args, **kwargs),
@@ -53,10 +28,7 @@ async def to_thread(
def asyncify(function: Callable[T_ParamSpec, T_Retval]) -> Callable[T_ParamSpec, Awaitable[T_Retval]]:
"""
Take a blocking function and create an async one that receives the same
- positional and keyword arguments. For python version 3.9 and above, it uses
- asyncio.to_thread to run the function in a separate thread. For python version
- 3.8, it uses locally defined copy of the asyncio.to_thread function which was
- introduced in python 3.9.
+ positional and keyword arguments.
Usage:
diff --git a/src/gradient/_utils/_utils.py b/src/gradient/_utils/_utils.py
index 50d59269..eec7f4a1 100644
--- a/src/gradient/_utils/_utils.py
+++ b/src/gradient/_utils/_utils.py
@@ -133,7 +133,7 @@ def is_given(obj: _T | NotGiven | Omit) -> TypeGuard[_T]:
# Type safe methods for narrowing types with TypeVars.
# The default narrowing for isinstance(obj, dict) is dict[unknown, unknown],
# however this cause Pyright to rightfully report errors. As we know we don't
-# care about the contained types we can safely use `object` in it's place.
+# care about the contained types we can safely use `object` in its place.
#
# There are two separate functions defined, `is_*` and `is_*_t` for different use cases.
# `is_*` is for when you're dealing with an unknown input
diff --git a/src/gradient/_version.py b/src/gradient/_version.py
index 0190d688..6e29a000 100644
--- a/src/gradient/_version.py
+++ b/src/gradient/_version.py
@@ -1,4 +1,4 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
__title__ = "gradient"
-__version__ = "3.6.0" # x-release-please-version
+__version__ = "3.7.0" # x-release-please-version
diff --git a/src/gradient/resources/__init__.py b/src/gradient/resources/__init__.py
index fdc7d346..a797b18f 100644
--- a/src/gradient/resources/__init__.py
+++ b/src/gradient/resources/__init__.py
@@ -1,5 +1,13 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+from .nfs import (
+ NfsResource,
+ AsyncNfsResource,
+ NfsResourceWithRawResponse,
+ AsyncNfsResourceWithRawResponse,
+ NfsResourceWithStreamingResponse,
+ AsyncNfsResourceWithStreamingResponse,
+)
from .chat import (
ChatResource,
AsyncChatResource,
@@ -128,4 +136,10 @@
"AsyncDatabasesResourceWithRawResponse",
"DatabasesResourceWithStreamingResponse",
"AsyncDatabasesResourceWithStreamingResponse",
+ "NfsResource",
+ "AsyncNfsResource",
+ "NfsResourceWithRawResponse",
+ "AsyncNfsResourceWithRawResponse",
+ "NfsResourceWithStreamingResponse",
+ "AsyncNfsResourceWithStreamingResponse",
]
diff --git a/src/gradient/resources/agents/agents.py b/src/gradient/resources/agents/agents.py
index 686678ba..33a59788 100644
--- a/src/gradient/resources/agents/agents.py
+++ b/src/gradient/resources/agents/agents.py
@@ -185,12 +185,14 @@ def create(
description: str | Omit = omit,
instruction: str | Omit = omit,
knowledge_base_uuid: SequenceNotStr[str] | Omit = omit,
+ model_provider_key_uuid: str | Omit = omit,
model_uuid: str | Omit = omit,
name: str | Omit = omit,
openai_key_uuid: str | Omit = omit,
project_id: str | Omit = omit,
region: str | Omit = omit,
tags: SequenceNotStr[str] | Omit = omit,
+ workspace_uuid: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -227,6 +229,8 @@ def create(
tags: Agent tag to organize related resources
+ workspace_uuid: Identifier for the workspace
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -245,12 +249,14 @@ def create(
"description": description,
"instruction": instruction,
"knowledge_base_uuid": knowledge_base_uuid,
+ "model_provider_key_uuid": model_provider_key_uuid,
"model_uuid": model_uuid,
"name": name,
"openai_key_uuid": openai_key_uuid,
"project_id": project_id,
"region": region,
"tags": tags,
+ "workspace_uuid": workspace_uuid,
},
agent_create_params.AgentCreateParams,
),
@@ -302,12 +308,14 @@ def update(
path_uuid: str,
*,
agent_log_insights_enabled: bool | Omit = omit,
+ allowed_domains: SequenceNotStr[str] | Omit = omit,
anthropic_key_uuid: str | Omit = omit,
conversation_logs_enabled: bool | Omit = omit,
description: str | Omit = omit,
instruction: str | Omit = omit,
k: int | Omit = omit,
max_tokens: int | Omit = omit,
+ model_provider_key_uuid: str | Omit = omit,
model_uuid: str | Omit = omit,
name: str | Omit = omit,
openai_key_uuid: str | Omit = omit,
@@ -331,6 +339,9 @@ def update(
response body is a JSON object containing the agent.
Args:
+ allowed_domains: Optional list of allowed domains for the chatbot - Must use fully qualified
+ domain name (FQDN) such as https://example.com
+
anthropic_key_uuid: Optional anthropic key uuid for use with anthropic models
conversation_logs_enabled: Optional update of conversation logs enabled
@@ -348,6 +359,8 @@ def update(
or output, set as a number between 1 and 512. This determines the length of each
response.
+ model_provider_key_uuid: Optional Model Provider uuid for use with provider models
+
model_uuid: Identifier for the foundation model.
name: Agent name
@@ -391,12 +404,14 @@ def update(
body=maybe_transform(
{
"agent_log_insights_enabled": agent_log_insights_enabled,
+ "allowed_domains": allowed_domains,
"anthropic_key_uuid": anthropic_key_uuid,
"conversation_logs_enabled": conversation_logs_enabled,
"description": description,
"instruction": instruction,
"k": k,
"max_tokens": max_tokens,
+ "model_provider_key_uuid": model_provider_key_uuid,
"model_uuid": model_uuid,
"name": name,
"openai_key_uuid": openai_key_uuid,
@@ -767,12 +782,14 @@ async def create(
description: str | Omit = omit,
instruction: str | Omit = omit,
knowledge_base_uuid: SequenceNotStr[str] | Omit = omit,
+ model_provider_key_uuid: str | Omit = omit,
model_uuid: str | Omit = omit,
name: str | Omit = omit,
openai_key_uuid: str | Omit = omit,
project_id: str | Omit = omit,
region: str | Omit = omit,
tags: SequenceNotStr[str] | Omit = omit,
+ workspace_uuid: str | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
@@ -809,6 +826,8 @@ async def create(
tags: Agent tag to organize related resources
+ workspace_uuid: Identifier for the workspace
+
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
@@ -827,12 +846,14 @@ async def create(
"description": description,
"instruction": instruction,
"knowledge_base_uuid": knowledge_base_uuid,
+ "model_provider_key_uuid": model_provider_key_uuid,
"model_uuid": model_uuid,
"name": name,
"openai_key_uuid": openai_key_uuid,
"project_id": project_id,
"region": region,
"tags": tags,
+ "workspace_uuid": workspace_uuid,
},
agent_create_params.AgentCreateParams,
),
@@ -884,12 +905,14 @@ async def update(
path_uuid: str,
*,
agent_log_insights_enabled: bool | Omit = omit,
+ allowed_domains: SequenceNotStr[str] | Omit = omit,
anthropic_key_uuid: str | Omit = omit,
conversation_logs_enabled: bool | Omit = omit,
description: str | Omit = omit,
instruction: str | Omit = omit,
k: int | Omit = omit,
max_tokens: int | Omit = omit,
+ model_provider_key_uuid: str | Omit = omit,
model_uuid: str | Omit = omit,
name: str | Omit = omit,
openai_key_uuid: str | Omit = omit,
@@ -913,6 +936,9 @@ async def update(
response body is a JSON object containing the agent.
Args:
+ allowed_domains: Optional list of allowed domains for the chatbot - Must use fully qualified
+ domain name (FQDN) such as https://example.com
+
anthropic_key_uuid: Optional anthropic key uuid for use with anthropic models
conversation_logs_enabled: Optional update of conversation logs enabled
@@ -930,6 +956,8 @@ async def update(
or output, set as a number between 1 and 512. This determines the length of each
response.
+ model_provider_key_uuid: Optional Model Provider uuid for use with provider models
+
model_uuid: Identifier for the foundation model.
name: Agent name
@@ -973,12 +1001,14 @@ async def update(
body=await async_maybe_transform(
{
"agent_log_insights_enabled": agent_log_insights_enabled,
+ "allowed_domains": allowed_domains,
"anthropic_key_uuid": anthropic_key_uuid,
"conversation_logs_enabled": conversation_logs_enabled,
"description": description,
"instruction": instruction,
"k": k,
"max_tokens": max_tokens,
+ "model_provider_key_uuid": model_provider_key_uuid,
"model_uuid": model_uuid,
"name": name,
"openai_key_uuid": openai_key_uuid,
diff --git a/src/gradient/resources/agents/evaluation_metrics/__init__.py b/src/gradient/resources/agents/evaluation_metrics/__init__.py
index 31e2f93b..515a221b 100644
--- a/src/gradient/resources/agents/evaluation_metrics/__init__.py
+++ b/src/gradient/resources/agents/evaluation_metrics/__init__.py
@@ -48,6 +48,14 @@
EvaluationMetricsResourceWithStreamingResponse,
AsyncEvaluationMetricsResourceWithStreamingResponse,
)
+from .scheduled_indexing import (
+ ScheduledIndexingResource,
+ AsyncScheduledIndexingResource,
+ ScheduledIndexingResourceWithRawResponse,
+ AsyncScheduledIndexingResourceWithRawResponse,
+ ScheduledIndexingResourceWithStreamingResponse,
+ AsyncScheduledIndexingResourceWithStreamingResponse,
+)
__all__ = [
"WorkspacesResource",
@@ -80,6 +88,12 @@
"AsyncOauth2ResourceWithRawResponse",
"Oauth2ResourceWithStreamingResponse",
"AsyncOauth2ResourceWithStreamingResponse",
+ "ScheduledIndexingResource",
+ "AsyncScheduledIndexingResource",
+ "ScheduledIndexingResourceWithRawResponse",
+ "AsyncScheduledIndexingResourceWithRawResponse",
+ "ScheduledIndexingResourceWithStreamingResponse",
+ "AsyncScheduledIndexingResourceWithStreamingResponse",
"EvaluationMetricsResource",
"AsyncEvaluationMetricsResource",
"EvaluationMetricsResourceWithRawResponse",
diff --git a/src/gradient/resources/agents/evaluation_metrics/evaluation_metrics.py b/src/gradient/resources/agents/evaluation_metrics/evaluation_metrics.py
index 43c1aa9b..14ea4d55 100644
--- a/src/gradient/resources/agents/evaluation_metrics/evaluation_metrics.py
+++ b/src/gradient/resources/agents/evaluation_metrics/evaluation_metrics.py
@@ -40,6 +40,14 @@
)
from ...._base_client import make_request_options
from ....types.agents import evaluation_metric_list_regions_params
+from .scheduled_indexing import (
+ ScheduledIndexingResource,
+ AsyncScheduledIndexingResource,
+ ScheduledIndexingResourceWithRawResponse,
+ AsyncScheduledIndexingResourceWithRawResponse,
+ ScheduledIndexingResourceWithStreamingResponse,
+ AsyncScheduledIndexingResourceWithStreamingResponse,
+)
from .anthropic.anthropic import (
AnthropicResource,
AsyncAnthropicResource,
@@ -83,6 +91,10 @@ def openai(self) -> OpenAIResource:
def oauth2(self) -> Oauth2Resource:
return Oauth2Resource(self._client)
+ @cached_property
+ def scheduled_indexing(self) -> ScheduledIndexingResource:
+ return ScheduledIndexingResource(self._client)
+
@cached_property
def with_raw_response(self) -> EvaluationMetricsResourceWithRawResponse:
"""
@@ -196,6 +208,10 @@ def openai(self) -> AsyncOpenAIResource:
def oauth2(self) -> AsyncOauth2Resource:
return AsyncOauth2Resource(self._client)
+ @cached_property
+ def scheduled_indexing(self) -> AsyncScheduledIndexingResource:
+ return AsyncScheduledIndexingResource(self._client)
+
@cached_property
def with_raw_response(self) -> AsyncEvaluationMetricsResourceWithRawResponse:
"""
@@ -319,6 +335,10 @@ def openai(self) -> OpenAIResourceWithRawResponse:
def oauth2(self) -> Oauth2ResourceWithRawResponse:
return Oauth2ResourceWithRawResponse(self._evaluation_metrics.oauth2)
+ @cached_property
+ def scheduled_indexing(self) -> ScheduledIndexingResourceWithRawResponse:
+ return ScheduledIndexingResourceWithRawResponse(self._evaluation_metrics.scheduled_indexing)
+
class AsyncEvaluationMetricsResourceWithRawResponse:
def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None:
@@ -351,6 +371,10 @@ def openai(self) -> AsyncOpenAIResourceWithRawResponse:
def oauth2(self) -> AsyncOauth2ResourceWithRawResponse:
return AsyncOauth2ResourceWithRawResponse(self._evaluation_metrics.oauth2)
+ @cached_property
+ def scheduled_indexing(self) -> AsyncScheduledIndexingResourceWithRawResponse:
+ return AsyncScheduledIndexingResourceWithRawResponse(self._evaluation_metrics.scheduled_indexing)
+
class EvaluationMetricsResourceWithStreamingResponse:
def __init__(self, evaluation_metrics: EvaluationMetricsResource) -> None:
@@ -383,6 +407,10 @@ def openai(self) -> OpenAIResourceWithStreamingResponse:
def oauth2(self) -> Oauth2ResourceWithStreamingResponse:
return Oauth2ResourceWithStreamingResponse(self._evaluation_metrics.oauth2)
+ @cached_property
+ def scheduled_indexing(self) -> ScheduledIndexingResourceWithStreamingResponse:
+ return ScheduledIndexingResourceWithStreamingResponse(self._evaluation_metrics.scheduled_indexing)
+
class AsyncEvaluationMetricsResourceWithStreamingResponse:
def __init__(self, evaluation_metrics: AsyncEvaluationMetricsResource) -> None:
@@ -414,3 +442,7 @@ def openai(self) -> AsyncOpenAIResourceWithStreamingResponse:
@cached_property
def oauth2(self) -> AsyncOauth2ResourceWithStreamingResponse:
return AsyncOauth2ResourceWithStreamingResponse(self._evaluation_metrics.oauth2)
+
+ @cached_property
+ def scheduled_indexing(self) -> AsyncScheduledIndexingResourceWithStreamingResponse:
+ return AsyncScheduledIndexingResourceWithStreamingResponse(self._evaluation_metrics.scheduled_indexing)
diff --git a/src/gradient/resources/agents/evaluation_metrics/scheduled_indexing.py b/src/gradient/resources/agents/evaluation_metrics/scheduled_indexing.py
new file mode 100644
index 00000000..e346f7ae
--- /dev/null
+++ b/src/gradient/resources/agents/evaluation_metrics/scheduled_indexing.py
@@ -0,0 +1,377 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+
+import httpx
+
+from ...._types import Body, Omit, Query, Headers, NotGiven, omit, not_given
+from ...._utils import maybe_transform, async_maybe_transform
+from ...._compat import cached_property
+from ...._resource import SyncAPIResource, AsyncAPIResource
+from ...._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ...._base_client import make_request_options
+from ....types.agents.evaluation_metrics import scheduled_indexing_create_params
+from ....types.agents.evaluation_metrics.scheduled_indexing_create_response import ScheduledIndexingCreateResponse
+from ....types.agents.evaluation_metrics.scheduled_indexing_delete_response import ScheduledIndexingDeleteResponse
+from ....types.agents.evaluation_metrics.scheduled_indexing_retrieve_response import ScheduledIndexingRetrieveResponse
+
+__all__ = ["ScheduledIndexingResource", "AsyncScheduledIndexingResource"]
+
+
+class ScheduledIndexingResource(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> ScheduledIndexingResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return ScheduledIndexingResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> ScheduledIndexingResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return ScheduledIndexingResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ days: Iterable[int] | Omit = omit,
+ knowledge_base_uuid: str | Omit = omit,
+ time: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ScheduledIndexingCreateResponse:
+ """
+ To create scheduled indexing for a knowledge base, send a POST request to
+ `/v2/gen-ai/scheduled-indexing`.
+
+ Args:
+ days: Days for execution (day is represented same as in a cron expression, e.g. Monday
+ begins with 1 )
+
+ knowledge_base_uuid: Knowledge base uuid for which the schedule is created
+
+ time: Time of execution (HH:MM) UTC
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v2/gen-ai/scheduled-indexing"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/scheduled-indexing",
+ body=maybe_transform(
+ {
+ "days": days,
+ "knowledge_base_uuid": knowledge_base_uuid,
+ "time": time,
+ },
+ scheduled_indexing_create_params.ScheduledIndexingCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ScheduledIndexingCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ knowledge_base_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ScheduledIndexingRetrieveResponse:
+ """
+ Get Scheduled Indexing for knowledge base using knoweldge base uuid, send a GET
+ request to `/v2/gen-ai/scheduled-indexing/knowledge-base/{knowledge_base_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not knowledge_base_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}"
+ )
+ return self._get(
+ f"/v2/gen-ai/scheduled-indexing/knowledge-base/{knowledge_base_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/scheduled-indexing/knowledge-base/{knowledge_base_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ScheduledIndexingRetrieveResponse,
+ )
+
+ def delete(
+ self,
+ uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ScheduledIndexingDeleteResponse:
+ """
+ Delete Scheduled Indexing for knowledge base, send a DELETE request to
+ `/v2/gen-ai/scheduled-indexing/{uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return self._delete(
+ f"/v2/gen-ai/scheduled-indexing/{uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/scheduled-indexing/{uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ScheduledIndexingDeleteResponse,
+ )
+
+
+class AsyncScheduledIndexingResource(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncScheduledIndexingResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncScheduledIndexingResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncScheduledIndexingResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncScheduledIndexingResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ days: Iterable[int] | Omit = omit,
+ knowledge_base_uuid: str | Omit = omit,
+ time: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ScheduledIndexingCreateResponse:
+ """
+ To create scheduled indexing for a knowledge base, send a POST request to
+ `/v2/gen-ai/scheduled-indexing`.
+
+ Args:
+ days: Days for execution (day is represented same as in a cron expression, e.g. Monday
+ begins with 1 )
+
+ knowledge_base_uuid: Knowledge base uuid for which the schedule is created
+
+ time: Time of execution (HH:MM) UTC
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v2/gen-ai/scheduled-indexing"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/scheduled-indexing",
+ body=await async_maybe_transform(
+ {
+ "days": days,
+ "knowledge_base_uuid": knowledge_base_uuid,
+ "time": time,
+ },
+ scheduled_indexing_create_params.ScheduledIndexingCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ScheduledIndexingCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ knowledge_base_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ScheduledIndexingRetrieveResponse:
+ """
+ Get Scheduled Indexing for knowledge base using knoweldge base uuid, send a GET
+ request to `/v2/gen-ai/scheduled-indexing/knowledge-base/{knowledge_base_uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not knowledge_base_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}"
+ )
+ return await self._get(
+ f"/v2/gen-ai/scheduled-indexing/knowledge-base/{knowledge_base_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/scheduled-indexing/knowledge-base/{knowledge_base_uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ScheduledIndexingRetrieveResponse,
+ )
+
+ async def delete(
+ self,
+ uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> ScheduledIndexingDeleteResponse:
+ """
+ Delete Scheduled Indexing for knowledge base, send a DELETE request to
+ `/v2/gen-ai/scheduled-indexing/{uuid}`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not uuid:
+ raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ return await self._delete(
+ f"/v2/gen-ai/scheduled-indexing/{uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/scheduled-indexing/{uuid}",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=ScheduledIndexingDeleteResponse,
+ )
+
+
+class ScheduledIndexingResourceWithRawResponse:
+ def __init__(self, scheduled_indexing: ScheduledIndexingResource) -> None:
+ self._scheduled_indexing = scheduled_indexing
+
+ self.create = to_raw_response_wrapper(
+ scheduled_indexing.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ scheduled_indexing.retrieve,
+ )
+ self.delete = to_raw_response_wrapper(
+ scheduled_indexing.delete,
+ )
+
+
+class AsyncScheduledIndexingResourceWithRawResponse:
+ def __init__(self, scheduled_indexing: AsyncScheduledIndexingResource) -> None:
+ self._scheduled_indexing = scheduled_indexing
+
+ self.create = async_to_raw_response_wrapper(
+ scheduled_indexing.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ scheduled_indexing.retrieve,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ scheduled_indexing.delete,
+ )
+
+
+class ScheduledIndexingResourceWithStreamingResponse:
+ def __init__(self, scheduled_indexing: ScheduledIndexingResource) -> None:
+ self._scheduled_indexing = scheduled_indexing
+
+ self.create = to_streamed_response_wrapper(
+ scheduled_indexing.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ scheduled_indexing.retrieve,
+ )
+ self.delete = to_streamed_response_wrapper(
+ scheduled_indexing.delete,
+ )
+
+
+class AsyncScheduledIndexingResourceWithStreamingResponse:
+ def __init__(self, scheduled_indexing: AsyncScheduledIndexingResource) -> None:
+ self._scheduled_indexing = scheduled_indexing
+
+ self.create = async_to_streamed_response_wrapper(
+ scheduled_indexing.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ scheduled_indexing.retrieve,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ scheduled_indexing.delete,
+ )
diff --git a/src/gradient/resources/knowledge_bases/indexing_jobs.py b/src/gradient/resources/knowledge_bases/indexing_jobs.py
index 647ab308..7936c73c 100644
--- a/src/gradient/resources/knowledge_bases/indexing_jobs.py
+++ b/src/gradient/resources/knowledge_bases/indexing_jobs.py
@@ -28,6 +28,7 @@
from ...types.knowledge_bases.indexing_job_create_response import IndexingJobCreateResponse
from ...types.knowledge_bases.indexing_job_retrieve_response import IndexingJobRetrieveResponse
from ...types.knowledge_bases.indexing_job_update_cancel_response import IndexingJobUpdateCancelResponse
+from ...types.knowledge_bases.indexing_job_retrieve_signed_url_response import IndexingJobRetrieveSignedURLResponse
from ...types.knowledge_bases.indexing_job_retrieve_data_sources_response import IndexingJobRetrieveDataSourcesResponse
__all__ = ["IndexingJobsResource", "AsyncIndexingJobsResource"]
@@ -221,6 +222,42 @@ def retrieve_data_sources(
cast_to=IndexingJobRetrieveDataSourcesResponse,
)
+ def retrieve_signed_url(
+ self,
+ indexing_job_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> IndexingJobRetrieveSignedURLResponse:
+ """
+ To get a signed URL for indexing job details, send a GET request to
+ `/v2/gen-ai/indexing_jobs/{uuid}/details_signed_url`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not indexing_job_uuid:
+ raise ValueError(f"Expected a non-empty value for `indexing_job_uuid` but received {indexing_job_uuid!r}")
+ return self._get(
+ f"/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/details_signed_url"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/details_signed_url",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=IndexingJobRetrieveSignedURLResponse,
+ )
+
def update_cancel(
self,
path_uuid: str,
@@ -357,8 +394,7 @@ def wait_for_completion(
elapsed = time.time() - start_time
if elapsed >= timeout:
raise IndexingJobTimeoutError(
- f"Indexing job {uuid} did not complete within {timeout} seconds. "
- f"Current phase: {phase}",
+ f"Indexing job {uuid} did not complete within {timeout} seconds. Current phase: {phase}",
uuid=uuid,
phase=phase,
timeout=timeout,
@@ -556,6 +592,42 @@ async def retrieve_data_sources(
cast_to=IndexingJobRetrieveDataSourcesResponse,
)
+ async def retrieve_signed_url(
+ self,
+ indexing_job_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> IndexingJobRetrieveSignedURLResponse:
+ """
+ To get a signed URL for indexing job details, send a GET request to
+ `/v2/gen-ai/indexing_jobs/{uuid}/details_signed_url`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not indexing_job_uuid:
+ raise ValueError(f"Expected a non-empty value for `indexing_job_uuid` but received {indexing_job_uuid!r}")
+ return await self._get(
+ f"/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/details_signed_url"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/indexing_jobs/{indexing_job_uuid}/details_signed_url",
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=IndexingJobRetrieveSignedURLResponse,
+ )
+
async def update_cancel(
self,
path_uuid: str,
@@ -692,8 +764,7 @@ async def wait_for_completion(
elapsed = time.time() - start_time
if elapsed >= timeout:
raise IndexingJobTimeoutError(
- f"Indexing job {uuid} did not complete within {timeout} seconds. "
- f"Current phase: {phase}",
+ f"Indexing job {uuid} did not complete within {timeout} seconds. Current phase: {phase}",
uuid=uuid,
phase=phase,
timeout=timeout,
@@ -719,6 +790,9 @@ def __init__(self, indexing_jobs: IndexingJobsResource) -> None:
self.retrieve_data_sources = to_raw_response_wrapper(
indexing_jobs.retrieve_data_sources,
)
+ self.retrieve_signed_url = to_raw_response_wrapper(
+ indexing_jobs.retrieve_signed_url,
+ )
self.update_cancel = to_raw_response_wrapper(
indexing_jobs.update_cancel,
)
@@ -743,6 +817,9 @@ def __init__(self, indexing_jobs: AsyncIndexingJobsResource) -> None:
self.retrieve_data_sources = async_to_raw_response_wrapper(
indexing_jobs.retrieve_data_sources,
)
+ self.retrieve_signed_url = async_to_raw_response_wrapper(
+ indexing_jobs.retrieve_signed_url,
+ )
self.update_cancel = async_to_raw_response_wrapper(
indexing_jobs.update_cancel,
)
@@ -767,6 +844,9 @@ def __init__(self, indexing_jobs: IndexingJobsResource) -> None:
self.retrieve_data_sources = to_streamed_response_wrapper(
indexing_jobs.retrieve_data_sources,
)
+ self.retrieve_signed_url = to_streamed_response_wrapper(
+ indexing_jobs.retrieve_signed_url,
+ )
self.update_cancel = to_streamed_response_wrapper(
indexing_jobs.update_cancel,
)
@@ -791,6 +871,9 @@ def __init__(self, indexing_jobs: AsyncIndexingJobsResource) -> None:
self.retrieve_data_sources = async_to_streamed_response_wrapper(
indexing_jobs.retrieve_data_sources,
)
+ self.retrieve_signed_url = async_to_streamed_response_wrapper(
+ indexing_jobs.retrieve_signed_url,
+ )
self.update_cancel = async_to_streamed_response_wrapper(
indexing_jobs.update_cancel,
)
diff --git a/src/gradient/resources/knowledge_bases/knowledge_bases.py b/src/gradient/resources/knowledge_bases/knowledge_bases.py
index 4325148c..2378f7f4 100644
--- a/src/gradient/resources/knowledge_bases/knowledge_bases.py
+++ b/src/gradient/resources/knowledge_bases/knowledge_bases.py
@@ -8,8 +8,21 @@
import httpx
-from ...types import knowledge_base_list_params, knowledge_base_create_params, knowledge_base_update_params
-from ..._types import Body, Omit, Query, Headers, NotGiven, SequenceNotStr, omit, not_given
+from ...types import (
+ knowledge_base_list_params,
+ knowledge_base_create_params,
+ knowledge_base_update_params,
+)
+from ..._types import (
+ Body,
+ Omit,
+ Query,
+ Headers,
+ NotGiven,
+ SequenceNotStr,
+ omit,
+ not_given,
+)
from ..._utils import maybe_transform, async_maybe_transform
from ..._compat import cached_property
from ..._resource import SyncAPIResource, AsyncAPIResource
@@ -41,6 +54,9 @@
from ...types.knowledge_base_delete_response import KnowledgeBaseDeleteResponse
from ...types.knowledge_base_update_response import KnowledgeBaseUpdateResponse
from ...types.knowledge_base_retrieve_response import KnowledgeBaseRetrieveResponse
+from ...types.knowledge_base_list_indexing_jobs_response import (
+ KnowledgeBaseListIndexingJobsResponse,
+)
__all__ = [
"KnowledgeBasesResource",
@@ -142,9 +158,11 @@ def create(
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
- "/v2/gen-ai/knowledge_bases"
- if self._client._base_url_overridden
- else "https://api.digitalocean.com/v2/gen-ai/knowledge_bases",
+ (
+ "/v2/gen-ai/knowledge_bases"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/knowledge_bases"
+ ),
body=maybe_transform(
{
"database_id": database_id,
@@ -159,7 +177,10 @@ def create(
knowledge_base_create_params.KnowledgeBaseCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
),
cast_to=KnowledgeBaseCreateResponse,
)
@@ -189,13 +210,20 @@ def retrieve(
timeout: Override the client-level default timeout for this request, in seconds
"""
if not uuid:
- raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ raise ValueError(
+ f"Expected a non-empty value for `uuid` but received {uuid!r}"
+ )
return self._get(
- f"/v2/gen-ai/knowledge_bases/{uuid}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{uuid}",
+ (
+ f"/v2/gen-ai/knowledge_bases/{uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{uuid}"
+ ),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
),
cast_to=KnowledgeBaseRetrieveResponse,
)
@@ -243,11 +271,15 @@ def update(
timeout: Override the client-level default timeout for this request, in seconds
"""
if not path_uuid:
- raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}")
+ raise ValueError(
+ f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}"
+ )
return self._put(
- f"/v2/gen-ai/knowledge_bases/{path_uuid}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{path_uuid}",
+ (
+ f"/v2/gen-ai/knowledge_bases/{path_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{path_uuid}"
+ ),
body=maybe_transform(
{
"database_id": database_id,
@@ -260,7 +292,10 @@ def update(
knowledge_base_update_params.KnowledgeBaseUpdateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
),
cast_to=KnowledgeBaseUpdateResponse,
)
@@ -294,9 +329,11 @@ def list(
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._get(
- "/v2/gen-ai/knowledge_bases"
- if self._client._base_url_overridden
- else "https://api.digitalocean.com/v2/gen-ai/knowledge_bases",
+ (
+ "/v2/gen-ai/knowledge_bases"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/knowledge_bases"
+ ),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -338,13 +375,20 @@ def delete(
timeout: Override the client-level default timeout for this request, in seconds
"""
if not uuid:
- raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ raise ValueError(
+ f"Expected a non-empty value for `uuid` but received {uuid!r}"
+ )
return self._delete(
- f"/v2/gen-ai/knowledge_bases/{uuid}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{uuid}",
+ (
+ f"/v2/gen-ai/knowledge_bases/{uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{uuid}"
+ ),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
),
cast_to=KnowledgeBaseDeleteResponse,
)
@@ -390,7 +434,9 @@ def wait_for_database(
KnowledgeBaseTimeoutError: If the timeout is exceeded before the database becomes ONLINE
"""
if not uuid:
- raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ raise ValueError(
+ f"Expected a non-empty value for `uuid` but received {uuid!r}"
+ )
start_time = time.time()
failed_states = {"DECOMMISSIONED", "UNHEALTHY"}
@@ -416,7 +462,9 @@ def wait_for_database(
return response
if status in failed_states:
- raise KnowledgeBaseDatabaseError(f"Knowledge base database entered failed state: {status}")
+ raise KnowledgeBaseDatabaseError(
+ f"Knowledge base database entered failed state: {status}"
+ )
# Sleep before next poll, but don't exceed timeout
remaining_time = timeout - elapsed
@@ -424,6 +472,49 @@ def wait_for_database(
if sleep_time > 0:
time.sleep(sleep_time)
+ def list_indexing_jobs(
+ self,
+ knowledge_base_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KnowledgeBaseListIndexingJobsResponse:
+ """
+ To list latest 15 indexing jobs for a knowledge base, send a GET request to
+ `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/indexing_jobs`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not knowledge_base_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}"
+ )
+ return self._get(
+ (
+ f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/indexing_jobs"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/indexing_jobs"
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ ),
+ cast_to=KnowledgeBaseListIndexingJobsResponse,
+ )
+
class AsyncKnowledgeBasesResource(AsyncAPIResource):
@cached_property
@@ -445,7 +536,9 @@ def with_raw_response(self) -> AsyncKnowledgeBasesResourceWithRawResponse:
return AsyncKnowledgeBasesResourceWithRawResponse(self)
@cached_property
- def with_streaming_response(self) -> AsyncKnowledgeBasesResourceWithStreamingResponse:
+ def with_streaming_response(
+ self,
+ ) -> AsyncKnowledgeBasesResourceWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
@@ -505,9 +598,11 @@ async def create(
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._post(
- "/v2/gen-ai/knowledge_bases"
- if self._client._base_url_overridden
- else "https://api.digitalocean.com/v2/gen-ai/knowledge_bases",
+ (
+ "/v2/gen-ai/knowledge_bases"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/knowledge_bases"
+ ),
body=await async_maybe_transform(
{
"database_id": database_id,
@@ -522,7 +617,10 @@ async def create(
knowledge_base_create_params.KnowledgeBaseCreateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
),
cast_to=KnowledgeBaseCreateResponse,
)
@@ -552,13 +650,20 @@ async def retrieve(
timeout: Override the client-level default timeout for this request, in seconds
"""
if not uuid:
- raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ raise ValueError(
+ f"Expected a non-empty value for `uuid` but received {uuid!r}"
+ )
return await self._get(
- f"/v2/gen-ai/knowledge_bases/{uuid}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{uuid}",
+ (
+ f"/v2/gen-ai/knowledge_bases/{uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{uuid}"
+ ),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
),
cast_to=KnowledgeBaseRetrieveResponse,
)
@@ -606,11 +711,15 @@ async def update(
timeout: Override the client-level default timeout for this request, in seconds
"""
if not path_uuid:
- raise ValueError(f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}")
+ raise ValueError(
+ f"Expected a non-empty value for `path_uuid` but received {path_uuid!r}"
+ )
return await self._put(
- f"/v2/gen-ai/knowledge_bases/{path_uuid}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{path_uuid}",
+ (
+ f"/v2/gen-ai/knowledge_bases/{path_uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{path_uuid}"
+ ),
body=await async_maybe_transform(
{
"database_id": database_id,
@@ -623,7 +732,10 @@ async def update(
knowledge_base_update_params.KnowledgeBaseUpdateParams,
),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
),
cast_to=KnowledgeBaseUpdateResponse,
)
@@ -657,9 +769,11 @@ async def list(
timeout: Override the client-level default timeout for this request, in seconds
"""
return await self._get(
- "/v2/gen-ai/knowledge_bases"
- if self._client._base_url_overridden
- else "https://api.digitalocean.com/v2/gen-ai/knowledge_bases",
+ (
+ "/v2/gen-ai/knowledge_bases"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/gen-ai/knowledge_bases"
+ ),
options=make_request_options(
extra_headers=extra_headers,
extra_query=extra_query,
@@ -701,13 +815,20 @@ async def delete(
timeout: Override the client-level default timeout for this request, in seconds
"""
if not uuid:
- raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ raise ValueError(
+ f"Expected a non-empty value for `uuid` but received {uuid!r}"
+ )
return await self._delete(
- f"/v2/gen-ai/knowledge_bases/{uuid}"
- if self._client._base_url_overridden
- else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{uuid}",
+ (
+ f"/v2/gen-ai/knowledge_bases/{uuid}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{uuid}"
+ ),
options=make_request_options(
- extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
),
cast_to=KnowledgeBaseDeleteResponse,
)
@@ -753,7 +874,9 @@ async def wait_for_database(
KnowledgeBaseTimeoutError: If the timeout is exceeded before the database becomes ONLINE
"""
if not uuid:
- raise ValueError(f"Expected a non-empty value for `uuid` but received {uuid!r}")
+ raise ValueError(
+ f"Expected a non-empty value for `uuid` but received {uuid!r}"
+ )
start_time = time.time()
failed_states = {"DECOMMISSIONED", "UNHEALTHY"}
@@ -779,7 +902,9 @@ async def wait_for_database(
return response
if status in failed_states:
- raise KnowledgeBaseDatabaseError(f"Knowledge base database entered failed state: {status}")
+ raise KnowledgeBaseDatabaseError(
+ f"Knowledge base database entered failed state: {status}"
+ )
# Sleep before next poll, but don't exceed timeout
remaining_time = timeout - elapsed
@@ -787,6 +912,49 @@ async def wait_for_database(
if sleep_time > 0:
await asyncio.sleep(sleep_time)
+ async def list_indexing_jobs(
+ self,
+ knowledge_base_uuid: str,
+ *,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> KnowledgeBaseListIndexingJobsResponse:
+ """
+ To list latest 15 indexing jobs for a knowledge base, send a GET request to
+ `/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/indexing_jobs`.
+
+ Args:
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not knowledge_base_uuid:
+ raise ValueError(
+ f"Expected a non-empty value for `knowledge_base_uuid` but received {knowledge_base_uuid!r}"
+ )
+ return await self._get(
+ (
+ f"/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/indexing_jobs"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/gen-ai/knowledge_bases/{knowledge_base_uuid}/indexing_jobs"
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ ),
+ cast_to=KnowledgeBaseListIndexingJobsResponse,
+ )
+
class KnowledgeBasesResourceWithRawResponse:
def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None:
@@ -810,6 +978,9 @@ def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None:
self.wait_for_database = to_raw_response_wrapper(
knowledge_bases.wait_for_database,
)
+ self.list_indexing_jobs = to_raw_response_wrapper(
+ knowledge_bases.list_indexing_jobs,
+ )
@cached_property
def data_sources(self) -> DataSourcesResourceWithRawResponse:
@@ -842,14 +1013,21 @@ def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None:
self.wait_for_database = async_to_raw_response_wrapper(
knowledge_bases.wait_for_database,
)
+ self.list_indexing_jobs = async_to_raw_response_wrapper(
+ knowledge_bases.list_indexing_jobs,
+ )
@cached_property
def data_sources(self) -> AsyncDataSourcesResourceWithRawResponse:
- return AsyncDataSourcesResourceWithRawResponse(self._knowledge_bases.data_sources)
+ return AsyncDataSourcesResourceWithRawResponse(
+ self._knowledge_bases.data_sources
+ )
@cached_property
def indexing_jobs(self) -> AsyncIndexingJobsResourceWithRawResponse:
- return AsyncIndexingJobsResourceWithRawResponse(self._knowledge_bases.indexing_jobs)
+ return AsyncIndexingJobsResourceWithRawResponse(
+ self._knowledge_bases.indexing_jobs
+ )
class KnowledgeBasesResourceWithStreamingResponse:
@@ -874,14 +1052,21 @@ def __init__(self, knowledge_bases: KnowledgeBasesResource) -> None:
self.wait_for_database = to_streamed_response_wrapper(
knowledge_bases.wait_for_database,
)
+ self.list_indexing_jobs = to_streamed_response_wrapper(
+ knowledge_bases.list_indexing_jobs,
+ )
@cached_property
def data_sources(self) -> DataSourcesResourceWithStreamingResponse:
- return DataSourcesResourceWithStreamingResponse(self._knowledge_bases.data_sources)
+ return DataSourcesResourceWithStreamingResponse(
+ self._knowledge_bases.data_sources
+ )
@cached_property
def indexing_jobs(self) -> IndexingJobsResourceWithStreamingResponse:
- return IndexingJobsResourceWithStreamingResponse(self._knowledge_bases.indexing_jobs)
+ return IndexingJobsResourceWithStreamingResponse(
+ self._knowledge_bases.indexing_jobs
+ )
class AsyncKnowledgeBasesResourceWithStreamingResponse:
@@ -906,11 +1091,18 @@ def __init__(self, knowledge_bases: AsyncKnowledgeBasesResource) -> None:
self.wait_for_database = async_to_streamed_response_wrapper(
knowledge_bases.wait_for_database,
)
+ self.list_indexing_jobs = async_to_streamed_response_wrapper(
+ knowledge_bases.list_indexing_jobs,
+ )
@cached_property
def data_sources(self) -> AsyncDataSourcesResourceWithStreamingResponse:
- return AsyncDataSourcesResourceWithStreamingResponse(self._knowledge_bases.data_sources)
+ return AsyncDataSourcesResourceWithStreamingResponse(
+ self._knowledge_bases.data_sources
+ )
@cached_property
def indexing_jobs(self) -> AsyncIndexingJobsResourceWithStreamingResponse:
- return AsyncIndexingJobsResourceWithStreamingResponse(self._knowledge_bases.indexing_jobs)
+ return AsyncIndexingJobsResourceWithStreamingResponse(
+ self._knowledge_bases.indexing_jobs
+ )
diff --git a/src/gradient/resources/nfs/__init__.py b/src/gradient/resources/nfs/__init__.py
new file mode 100644
index 00000000..28f843c0
--- /dev/null
+++ b/src/gradient/resources/nfs/__init__.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from .nfs import (
+ NfsResource,
+ AsyncNfsResource,
+ NfsResourceWithRawResponse,
+ AsyncNfsResourceWithRawResponse,
+ NfsResourceWithStreamingResponse,
+ AsyncNfsResourceWithStreamingResponse,
+)
+from .snapshots import (
+ SnapshotsResource,
+ AsyncSnapshotsResource,
+ SnapshotsResourceWithRawResponse,
+ AsyncSnapshotsResourceWithRawResponse,
+ SnapshotsResourceWithStreamingResponse,
+ AsyncSnapshotsResourceWithStreamingResponse,
+)
+
+__all__ = [
+ "SnapshotsResource",
+ "AsyncSnapshotsResource",
+ "SnapshotsResourceWithRawResponse",
+ "AsyncSnapshotsResourceWithRawResponse",
+ "SnapshotsResourceWithStreamingResponse",
+ "AsyncSnapshotsResourceWithStreamingResponse",
+ "NfsResource",
+ "AsyncNfsResource",
+ "NfsResourceWithRawResponse",
+ "AsyncNfsResourceWithRawResponse",
+ "NfsResourceWithStreamingResponse",
+ "AsyncNfsResourceWithStreamingResponse",
+]
diff --git a/src/gradient/resources/nfs/nfs.py b/src/gradient/resources/nfs/nfs.py
new file mode 100644
index 00000000..1510bb69
--- /dev/null
+++ b/src/gradient/resources/nfs/nfs.py
@@ -0,0 +1,780 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Literal, overload
+
+import httpx
+
+from ...types import nf_list_params, nf_create_params, nf_delete_params, nf_retrieve_params, nf_initiate_action_params
+from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, SequenceNotStr, omit, not_given
+from ..._utils import required_args, maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from .snapshots import (
+ SnapshotsResource,
+ AsyncSnapshotsResource,
+ SnapshotsResourceWithRawResponse,
+ AsyncSnapshotsResourceWithRawResponse,
+ SnapshotsResourceWithStreamingResponse,
+ AsyncSnapshotsResourceWithStreamingResponse,
+)
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ..._base_client import make_request_options
+from ...types.nf_list_response import NfListResponse
+from ...types.nf_create_response import NfCreateResponse
+from ...types.nf_retrieve_response import NfRetrieveResponse
+from ...types.nf_initiate_action_response import NfInitiateActionResponse
+
+__all__ = ["NfsResource", "AsyncNfsResource"]
+
+
+class NfsResource(SyncAPIResource):
+ @cached_property
+ def snapshots(self) -> SnapshotsResource:
+ return SnapshotsResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> NfsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return NfsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> NfsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return NfsResourceWithStreamingResponse(self)
+
+ def create(
+ self,
+ *,
+ name: str,
+ region: str,
+ size_gib: int,
+ vpc_ids: SequenceNotStr[str],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfCreateResponse:
+ """
+ To create a new NFS share, send a POST request to `/v2/nfs`.
+
+ Args:
+ name: The human-readable name of the share.
+
+ region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.
+
+ size_gib: The desired/provisioned size of the share in GiB (Gibibytes). Must be >= 50.
+
+ vpc_ids: List of VPC IDs that should be able to access the share.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._post(
+ "/v2/nfs" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/nfs",
+ body=maybe_transform(
+ {
+ "name": name,
+ "region": region,
+ "size_gib": size_gib,
+ "vpc_ids": vpc_ids,
+ },
+ nf_create_params.NfCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NfCreateResponse,
+ )
+
+ def retrieve(
+ self,
+ nfs_id: str,
+ *,
+ region: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfRetrieveResponse:
+ """
+ To get an NFS share, send a GET request to `/v2/nfs/{nfs_id}?region=${region}`.
+
+ A successful request will return the NFS share.
+
+ Args:
+ region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not nfs_id:
+ raise ValueError(f"Expected a non-empty value for `nfs_id` but received {nfs_id!r}")
+ return self._get(
+ f"/v2/nfs/{nfs_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/nfs/{nfs_id}",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform({"region": region}, nf_retrieve_params.NfRetrieveParams),
+ ),
+ cast_to=NfRetrieveResponse,
+ )
+
+ def list(
+ self,
+ *,
+ region: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfListResponse:
+ """
+ To list NFS shares, send a GET request to `/v2/nfs?region=${region}`.
+
+ A successful request will return all NFS shares belonging to the authenticated
+ user.
+
+ Args:
+ region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/nfs" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/nfs",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform({"region": region}, nf_list_params.NfListParams),
+ ),
+ cast_to=NfListResponse,
+ )
+
+ def delete(
+ self,
+ nfs_id: str,
+ *,
+ region: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To delete an NFS share, send a DELETE request to
+ `/v2/nfs/{nfs_id}?region=${region}`.
+
+ A successful request will return a `204 No Content` status code.
+
+ Args:
+ region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not nfs_id:
+ raise ValueError(f"Expected a non-empty value for `nfs_id` but received {nfs_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._delete(
+ f"/v2/nfs/{nfs_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/nfs/{nfs_id}",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform({"region": region}, nf_delete_params.NfDeleteParams),
+ ),
+ cast_to=NoneType,
+ )
+
+ @overload
+ def initiate_action(
+ self,
+ nfs_id: str,
+ *,
+ region: str,
+ type: Literal["resize", "snapshot"],
+ params: nf_initiate_action_params.NfsActionResizeParams | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfInitiateActionResponse:
+ """
+ To execute an action (such as resize) on a specified NFS share, send a POST
+ request to `/v2/nfs/{nfs_id}/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details |
+ | ----------------------- | -------------------------------------------------------------------------- |
+ | `resize` | Resizes an NFS share. Set the size_gib attribute to a desired value in GiB |
+ | `snapshot` | Takes a snapshot of an NFS share |
+
+ Args:
+ region: The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides.
+
+ type: The type of action to initiate for the NFS share (such as resize or snapshot).
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ def initiate_action(
+ self,
+ nfs_id: str,
+ *,
+ region: str,
+ type: Literal["resize", "snapshot"],
+ params: nf_initiate_action_params.NfsActionSnapshotParams | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfInitiateActionResponse:
+ """
+ To execute an action (such as resize) on a specified NFS share, send a POST
+ request to `/v2/nfs/{nfs_id}/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details |
+ | ----------------------- | -------------------------------------------------------------------------- |
+ | `resize` | Resizes an NFS share. Set the size_gib attribute to a desired value in GiB |
+ | `snapshot` | Takes a snapshot of an NFS share |
+
+ Args:
+ region: The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides.
+
+ type: The type of action to initiate for the NFS share (such as resize or snapshot).
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["region", "type"])
+ def initiate_action(
+ self,
+ nfs_id: str,
+ *,
+ region: str,
+ type: Literal["resize", "snapshot"],
+ params: nf_initiate_action_params.NfsActionResizeParams
+ | nf_initiate_action_params.NfsActionSnapshotParams
+ | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfInitiateActionResponse:
+ if not nfs_id:
+ raise ValueError(f"Expected a non-empty value for `nfs_id` but received {nfs_id!r}")
+ return self._post(
+ f"/v2/nfs/{nfs_id}/actions"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/nfs/{nfs_id}/actions",
+ body=maybe_transform(
+ {
+ "region": region,
+ "type": type,
+ "params": params,
+ },
+ nf_initiate_action_params.NfInitiateActionParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NfInitiateActionResponse,
+ )
+
+
+class AsyncNfsResource(AsyncAPIResource):
+ @cached_property
+ def snapshots(self) -> AsyncSnapshotsResource:
+ return AsyncSnapshotsResource(self._client)
+
+ @cached_property
+ def with_raw_response(self) -> AsyncNfsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncNfsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncNfsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncNfsResourceWithStreamingResponse(self)
+
+ async def create(
+ self,
+ *,
+ name: str,
+ region: str,
+ size_gib: int,
+ vpc_ids: SequenceNotStr[str],
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfCreateResponse:
+ """
+ To create a new NFS share, send a POST request to `/v2/nfs`.
+
+ Args:
+ name: The human-readable name of the share.
+
+ region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.
+
+ size_gib: The desired/provisioned size of the share in GiB (Gibibytes). Must be >= 50.
+
+ vpc_ids: List of VPC IDs that should be able to access the share.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._post(
+ "/v2/nfs" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/nfs",
+ body=await async_maybe_transform(
+ {
+ "name": name,
+ "region": region,
+ "size_gib": size_gib,
+ "vpc_ids": vpc_ids,
+ },
+ nf_create_params.NfCreateParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NfCreateResponse,
+ )
+
+ async def retrieve(
+ self,
+ nfs_id: str,
+ *,
+ region: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfRetrieveResponse:
+ """
+ To get an NFS share, send a GET request to `/v2/nfs/{nfs_id}?region=${region}`.
+
+ A successful request will return the NFS share.
+
+ Args:
+ region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not nfs_id:
+ raise ValueError(f"Expected a non-empty value for `nfs_id` but received {nfs_id!r}")
+ return await self._get(
+ f"/v2/nfs/{nfs_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/nfs/{nfs_id}",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform({"region": region}, nf_retrieve_params.NfRetrieveParams),
+ ),
+ cast_to=NfRetrieveResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ region: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfListResponse:
+ """
+ To list NFS shares, send a GET request to `/v2/nfs?region=${region}`.
+
+ A successful request will return all NFS shares belonging to the authenticated
+ user.
+
+ Args:
+ region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/nfs" if self._client._base_url_overridden else "https://api.digitalocean.com/v2/nfs",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform({"region": region}, nf_list_params.NfListParams),
+ ),
+ cast_to=NfListResponse,
+ )
+
+ async def delete(
+ self,
+ nfs_id: str,
+ *,
+ region: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To delete an NFS share, send a DELETE request to
+ `/v2/nfs/{nfs_id}?region=${region}`.
+
+ A successful request will return a `204 No Content` status code.
+
+ Args:
+ region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not nfs_id:
+ raise ValueError(f"Expected a non-empty value for `nfs_id` but received {nfs_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._delete(
+ f"/v2/nfs/{nfs_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/nfs/{nfs_id}",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform({"region": region}, nf_delete_params.NfDeleteParams),
+ ),
+ cast_to=NoneType,
+ )
+
+ @overload
+ async def initiate_action(
+ self,
+ nfs_id: str,
+ *,
+ region: str,
+ type: Literal["resize", "snapshot"],
+ params: nf_initiate_action_params.NfsActionResizeParams | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfInitiateActionResponse:
+ """
+ To execute an action (such as resize) on a specified NFS share, send a POST
+ request to `/v2/nfs/{nfs_id}/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details |
+ | ----------------------- | -------------------------------------------------------------------------- |
+ | `resize` | Resizes an NFS share. Set the size_gib attribute to a desired value in GiB |
+ | `snapshot` | Takes a snapshot of an NFS share |
+
+ Args:
+ region: The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides.
+
+ type: The type of action to initiate for the NFS share (such as resize or snapshot).
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @overload
+ async def initiate_action(
+ self,
+ nfs_id: str,
+ *,
+ region: str,
+ type: Literal["resize", "snapshot"],
+ params: nf_initiate_action_params.NfsActionSnapshotParams | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfInitiateActionResponse:
+ """
+ To execute an action (such as resize) on a specified NFS share, send a POST
+ request to `/v2/nfs/{nfs_id}/actions`. In the JSON body to the request, set the
+ `type` attribute to on of the supported action types:
+
+ | Action | Details |
+ | ----------------------- | -------------------------------------------------------------------------- |
+ | `resize` | Resizes an NFS share. Set the size_gib attribute to a desired value in GiB |
+ | `snapshot` | Takes a snapshot of an NFS share |
+
+ Args:
+ region: The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides.
+
+ type: The type of action to initiate for the NFS share (such as resize or snapshot).
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ ...
+
+ @required_args(["region", "type"])
+ async def initiate_action(
+ self,
+ nfs_id: str,
+ *,
+ region: str,
+ type: Literal["resize", "snapshot"],
+ params: nf_initiate_action_params.NfsActionResizeParams
+ | nf_initiate_action_params.NfsActionSnapshotParams
+ | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> NfInitiateActionResponse:
+ if not nfs_id:
+ raise ValueError(f"Expected a non-empty value for `nfs_id` but received {nfs_id!r}")
+ return await self._post(
+ f"/v2/nfs/{nfs_id}/actions"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/nfs/{nfs_id}/actions",
+ body=await async_maybe_transform(
+ {
+ "region": region,
+ "type": type,
+ "params": params,
+ },
+ nf_initiate_action_params.NfInitiateActionParams,
+ ),
+ options=make_request_options(
+ extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
+ ),
+ cast_to=NfInitiateActionResponse,
+ )
+
+
+class NfsResourceWithRawResponse:
+ def __init__(self, nfs: NfsResource) -> None:
+ self._nfs = nfs
+
+ self.create = to_raw_response_wrapper(
+ nfs.create,
+ )
+ self.retrieve = to_raw_response_wrapper(
+ nfs.retrieve,
+ )
+ self.list = to_raw_response_wrapper(
+ nfs.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ nfs.delete,
+ )
+ self.initiate_action = to_raw_response_wrapper(
+ nfs.initiate_action,
+ )
+
+ @cached_property
+ def snapshots(self) -> SnapshotsResourceWithRawResponse:
+ return SnapshotsResourceWithRawResponse(self._nfs.snapshots)
+
+
+class AsyncNfsResourceWithRawResponse:
+ def __init__(self, nfs: AsyncNfsResource) -> None:
+ self._nfs = nfs
+
+ self.create = async_to_raw_response_wrapper(
+ nfs.create,
+ )
+ self.retrieve = async_to_raw_response_wrapper(
+ nfs.retrieve,
+ )
+ self.list = async_to_raw_response_wrapper(
+ nfs.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ nfs.delete,
+ )
+ self.initiate_action = async_to_raw_response_wrapper(
+ nfs.initiate_action,
+ )
+
+ @cached_property
+ def snapshots(self) -> AsyncSnapshotsResourceWithRawResponse:
+ return AsyncSnapshotsResourceWithRawResponse(self._nfs.snapshots)
+
+
+class NfsResourceWithStreamingResponse:
+ def __init__(self, nfs: NfsResource) -> None:
+ self._nfs = nfs
+
+ self.create = to_streamed_response_wrapper(
+ nfs.create,
+ )
+ self.retrieve = to_streamed_response_wrapper(
+ nfs.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ nfs.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ nfs.delete,
+ )
+ self.initiate_action = to_streamed_response_wrapper(
+ nfs.initiate_action,
+ )
+
+ @cached_property
+ def snapshots(self) -> SnapshotsResourceWithStreamingResponse:
+ return SnapshotsResourceWithStreamingResponse(self._nfs.snapshots)
+
+
+class AsyncNfsResourceWithStreamingResponse:
+ def __init__(self, nfs: AsyncNfsResource) -> None:
+ self._nfs = nfs
+
+ self.create = async_to_streamed_response_wrapper(
+ nfs.create,
+ )
+ self.retrieve = async_to_streamed_response_wrapper(
+ nfs.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ nfs.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ nfs.delete,
+ )
+ self.initiate_action = async_to_streamed_response_wrapper(
+ nfs.initiate_action,
+ )
+
+ @cached_property
+ def snapshots(self) -> AsyncSnapshotsResourceWithStreamingResponse:
+ return AsyncSnapshotsResourceWithStreamingResponse(self._nfs.snapshots)
diff --git a/src/gradient/resources/nfs/snapshots.py b/src/gradient/resources/nfs/snapshots.py
new file mode 100644
index 00000000..65b56e03
--- /dev/null
+++ b/src/gradient/resources/nfs/snapshots.py
@@ -0,0 +1,418 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import httpx
+
+from ..._types import Body, Omit, Query, Headers, NoneType, NotGiven, omit, not_given
+from ..._utils import maybe_transform, async_maybe_transform
+from ..._compat import cached_property
+from ..._resource import SyncAPIResource, AsyncAPIResource
+from ..._response import (
+ to_raw_response_wrapper,
+ to_streamed_response_wrapper,
+ async_to_raw_response_wrapper,
+ async_to_streamed_response_wrapper,
+)
+from ...types.nfs import snapshot_list_params, snapshot_delete_params, snapshot_retrieve_params
+from ..._base_client import make_request_options
+from ...types.nfs.snapshot_list_response import SnapshotListResponse
+from ...types.nfs.snapshot_retrieve_response import SnapshotRetrieveResponse
+
+__all__ = ["SnapshotsResource", "AsyncSnapshotsResource"]
+
+
+class SnapshotsResource(SyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> SnapshotsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return SnapshotsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> SnapshotsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return SnapshotsResourceWithStreamingResponse(self)
+
+ def retrieve(
+ self,
+ nfs_snapshot_id: str,
+ *,
+ region: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SnapshotRetrieveResponse:
+ """
+ To get an NFS snapshot, send a GET request to
+ `/v2/nfs/snapshots/{nfs_snapshot_id}?region=${region}`.
+
+ A successful request will return the NFS snapshot.
+
+ Args:
+ region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not nfs_snapshot_id:
+ raise ValueError(f"Expected a non-empty value for `nfs_snapshot_id` but received {nfs_snapshot_id!r}")
+ return self._get(
+ f"/v2/nfs/snapshots/{nfs_snapshot_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/nfs/snapshots/{nfs_snapshot_id}",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform({"region": region}, snapshot_retrieve_params.SnapshotRetrieveParams),
+ ),
+ cast_to=SnapshotRetrieveResponse,
+ )
+
+ def list(
+ self,
+ *,
+ region: str,
+ share_id: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SnapshotListResponse:
+ """
+ To list all NFS snapshots, send a GET request to
+ `/v2/nfs/snapshots?region=${region}&share_id={share_id}`.
+
+ A successful request will return all NFS snapshots belonging to the
+ authenticated user in the specified region.
+
+ Optionally, you can filter snapshots by a specific NFS share by including the
+ `share_id` query parameter.
+
+ Args:
+ region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.
+
+ share_id: The unique ID of an NFS share. If provided, only snapshots of this specific
+ share will be returned.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return self._get(
+ "/v2/nfs/snapshots"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/nfs/snapshots",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform(
+ {
+ "region": region,
+ "share_id": share_id,
+ },
+ snapshot_list_params.SnapshotListParams,
+ ),
+ ),
+ cast_to=SnapshotListResponse,
+ )
+
+ def delete(
+ self,
+ nfs_snapshot_id: str,
+ *,
+ region: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To delete an NFS snapshot, send a DELETE request to
+ `/v2/nfs/snapshots/{nfs_snapshot_id}?region=${region}`.
+
+ A successful request will return a `204 No Content` status code.
+
+ Args:
+ region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not nfs_snapshot_id:
+ raise ValueError(f"Expected a non-empty value for `nfs_snapshot_id` but received {nfs_snapshot_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return self._delete(
+ f"/v2/nfs/snapshots/{nfs_snapshot_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/nfs/snapshots/{nfs_snapshot_id}",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=maybe_transform({"region": region}, snapshot_delete_params.SnapshotDeleteParams),
+ ),
+ cast_to=NoneType,
+ )
+
+
+class AsyncSnapshotsResource(AsyncAPIResource):
+ @cached_property
+ def with_raw_response(self) -> AsyncSnapshotsResourceWithRawResponse:
+ """
+ This property can be used as a prefix for any HTTP method call to return
+ the raw response object instead of the parsed content.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#accessing-raw-response-data-eg-headers
+ """
+ return AsyncSnapshotsResourceWithRawResponse(self)
+
+ @cached_property
+ def with_streaming_response(self) -> AsyncSnapshotsResourceWithStreamingResponse:
+ """
+ An alternative to `.with_raw_response` that doesn't eagerly read the response body.
+
+ For more information, see https://www.github.com/digitalocean/gradient-python#with_streaming_response
+ """
+ return AsyncSnapshotsResourceWithStreamingResponse(self)
+
+ async def retrieve(
+ self,
+ nfs_snapshot_id: str,
+ *,
+ region: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SnapshotRetrieveResponse:
+ """
+ To get an NFS snapshot, send a GET request to
+ `/v2/nfs/snapshots/{nfs_snapshot_id}?region=${region}`.
+
+ A successful request will return the NFS snapshot.
+
+ Args:
+ region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not nfs_snapshot_id:
+ raise ValueError(f"Expected a non-empty value for `nfs_snapshot_id` but received {nfs_snapshot_id!r}")
+ return await self._get(
+ f"/v2/nfs/snapshots/{nfs_snapshot_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/nfs/snapshots/{nfs_snapshot_id}",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform({"region": region}, snapshot_retrieve_params.SnapshotRetrieveParams),
+ ),
+ cast_to=SnapshotRetrieveResponse,
+ )
+
+ async def list(
+ self,
+ *,
+ region: str,
+ share_id: str | Omit = omit,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> SnapshotListResponse:
+ """
+ To list all NFS snapshots, send a GET request to
+ `/v2/nfs/snapshots?region=${region}&share_id={share_id}`.
+
+ A successful request will return all NFS snapshots belonging to the
+ authenticated user in the specified region.
+
+ Optionally, you can filter snapshots by a specific NFS share by including the
+ `share_id` query parameter.
+
+ Args:
+ region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.
+
+ share_id: The unique ID of an NFS share. If provided, only snapshots of this specific
+ share will be returned.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ return await self._get(
+ "/v2/nfs/snapshots"
+ if self._client._base_url_overridden
+ else "https://api.digitalocean.com/v2/nfs/snapshots",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform(
+ {
+ "region": region,
+ "share_id": share_id,
+ },
+ snapshot_list_params.SnapshotListParams,
+ ),
+ ),
+ cast_to=SnapshotListResponse,
+ )
+
+ async def delete(
+ self,
+ nfs_snapshot_id: str,
+ *,
+ region: str,
+ # Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
+ # The extra values given here take precedence over values defined on the client or passed to this method.
+ extra_headers: Headers | None = None,
+ extra_query: Query | None = None,
+ extra_body: Body | None = None,
+ timeout: float | httpx.Timeout | None | NotGiven = not_given,
+ ) -> None:
+ """
+ To delete an NFS snapshot, send a DELETE request to
+ `/v2/nfs/snapshots/{nfs_snapshot_id}?region=${region}`.
+
+ A successful request will return a `204 No Content` status code.
+
+ Args:
+ region: The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides.
+
+ extra_headers: Send extra headers
+
+ extra_query: Add additional query parameters to the request
+
+ extra_body: Add additional JSON properties to the request
+
+ timeout: Override the client-level default timeout for this request, in seconds
+ """
+ if not nfs_snapshot_id:
+ raise ValueError(f"Expected a non-empty value for `nfs_snapshot_id` but received {nfs_snapshot_id!r}")
+ extra_headers = {"Accept": "*/*", **(extra_headers or {})}
+ return await self._delete(
+ f"/v2/nfs/snapshots/{nfs_snapshot_id}"
+ if self._client._base_url_overridden
+ else f"https://api.digitalocean.com/v2/nfs/snapshots/{nfs_snapshot_id}",
+ options=make_request_options(
+ extra_headers=extra_headers,
+ extra_query=extra_query,
+ extra_body=extra_body,
+ timeout=timeout,
+ query=await async_maybe_transform({"region": region}, snapshot_delete_params.SnapshotDeleteParams),
+ ),
+ cast_to=NoneType,
+ )
+
+
+class SnapshotsResourceWithRawResponse:
+ def __init__(self, snapshots: SnapshotsResource) -> None:
+ self._snapshots = snapshots
+
+ self.retrieve = to_raw_response_wrapper(
+ snapshots.retrieve,
+ )
+ self.list = to_raw_response_wrapper(
+ snapshots.list,
+ )
+ self.delete = to_raw_response_wrapper(
+ snapshots.delete,
+ )
+
+
+class AsyncSnapshotsResourceWithRawResponse:
+ def __init__(self, snapshots: AsyncSnapshotsResource) -> None:
+ self._snapshots = snapshots
+
+ self.retrieve = async_to_raw_response_wrapper(
+ snapshots.retrieve,
+ )
+ self.list = async_to_raw_response_wrapper(
+ snapshots.list,
+ )
+ self.delete = async_to_raw_response_wrapper(
+ snapshots.delete,
+ )
+
+
+class SnapshotsResourceWithStreamingResponse:
+ def __init__(self, snapshots: SnapshotsResource) -> None:
+ self._snapshots = snapshots
+
+ self.retrieve = to_streamed_response_wrapper(
+ snapshots.retrieve,
+ )
+ self.list = to_streamed_response_wrapper(
+ snapshots.list,
+ )
+ self.delete = to_streamed_response_wrapper(
+ snapshots.delete,
+ )
+
+
+class AsyncSnapshotsResourceWithStreamingResponse:
+ def __init__(self, snapshots: AsyncSnapshotsResource) -> None:
+ self._snapshots = snapshots
+
+ self.retrieve = async_to_streamed_response_wrapper(
+ snapshots.retrieve,
+ )
+ self.list = async_to_streamed_response_wrapper(
+ snapshots.list,
+ )
+ self.delete = async_to_streamed_response_wrapper(
+ snapshots.delete,
+ )
diff --git a/src/gradient/types/__init__.py b/src/gradient/types/__init__.py
index 1299779c..58d45641 100644
--- a/src/gradient/types/__init__.py
+++ b/src/gradient/types/__init__.py
@@ -52,17 +52,24 @@
from .api_model import APIModel as APIModel
from .api_agreement import APIAgreement as APIAgreement
from .api_workspace import APIWorkspace as APIWorkspace
+from .nf_list_params import NfListParams as NfListParams
from .api_agent_model import APIAgentModel as APIAgentModel
+from .nf_create_params import NfCreateParams as NfCreateParams
+from .nf_delete_params import NfDeleteParams as NfDeleteParams
+from .nf_list_response import NfListResponse as NfListResponse
from .agent_list_params import AgentListParams as AgentListParams
from .api_model_version import APIModelVersion as APIModelVersion
from .model_list_params import ModelListParams as ModelListParams
from .api_knowledge_base import APIKnowledgeBase as APIKnowledgeBase
+from .nf_create_response import NfCreateResponse as NfCreateResponse
+from .nf_retrieve_params import NfRetrieveParams as NfRetrieveParams
from .region_list_params import RegionListParams as RegionListParams
from .agent_create_params import AgentCreateParams as AgentCreateParams
from .agent_list_response import AgentListResponse as AgentListResponse
from .agent_update_params import AgentUpdateParams as AgentUpdateParams
from .model_list_response import ModelListResponse as ModelListResponse
from .api_retrieval_method import APIRetrievalMethod as APIRetrievalMethod
+from .nf_retrieve_response import NfRetrieveResponse as NfRetrieveResponse
from .region_list_response import RegionListResponse as RegionListResponse
from .agent_create_response import AgentCreateResponse as AgentCreateResponse
from .agent_delete_response import AgentDeleteResponse as AgentDeleteResponse
@@ -77,6 +84,7 @@
from .api_deployment_visibility import APIDeploymentVisibility as APIDeploymentVisibility
from .gpu_droplet_create_params import GPUDropletCreateParams as GPUDropletCreateParams
from .gpu_droplet_list_response import GPUDropletListResponse as GPUDropletListResponse
+from .nf_initiate_action_params import NfInitiateActionParams as NfInitiateActionParams
from .agent_update_status_params import (
AgentUpdateStatusParams as AgentUpdateStatusParams,
)
@@ -91,6 +99,7 @@
from .gpu_droplet_create_response import (
GPUDropletCreateResponse as GPUDropletCreateResponse,
)
+from .nf_initiate_action_response import NfInitiateActionResponse as NfInitiateActionResponse
from .agent_update_status_response import (
AgentUpdateStatusResponse as AgentUpdateStatusResponse,
)
@@ -153,6 +162,9 @@
agent_list_response, # type: ignore # noqa: F401
agent_move_response, # type: ignore # noqa: F401
)
+from .knowledge_base_list_indexing_jobs_response import (
+ KnowledgeBaseListIndexingJobsResponse as KnowledgeBaseListIndexingJobsResponse,
+)
# Rebuild cyclical models only after all modules are imported.
# This ensures that, when building the deferred (due to cyclical references) model schema,
diff --git a/src/gradient/types/agent_create_params.py b/src/gradient/types/agent_create_params.py
index db84a258..343c5d70 100644
--- a/src/gradient/types/agent_create_params.py
+++ b/src/gradient/types/agent_create_params.py
@@ -28,6 +28,8 @@ class AgentCreateParams(TypedDict, total=False):
knowledge_base_uuid: SequenceNotStr[str]
"""Ids of the knowledge base(s) to attach to the agent"""
+ model_provider_key_uuid: str
+
model_uuid: str
"""Identifier for the foundation model."""
@@ -45,3 +47,6 @@ class AgentCreateParams(TypedDict, total=False):
tags: SequenceNotStr[str]
"""Agent tag to organize related resources"""
+
+ workspace_uuid: str
+ """Identifier for the workspace"""
diff --git a/src/gradient/types/agent_list_response.py b/src/gradient/types/agent_list_response.py
index 7a64c66e..c461f152 100644
--- a/src/gradient/types/agent_list_response.py
+++ b/src/gradient/types/agent_list_response.py
@@ -24,6 +24,8 @@
class AgentChatbot(BaseModel):
+ allowed_domains: Optional[List[str]] = None
+
button_background_color: Optional[str] = None
logo: Optional[str] = None
@@ -61,6 +63,7 @@ class AgentDeployment(BaseModel):
"STATUS_UNDEPLOYING",
"STATUS_UNDEPLOYMENT_FAILED",
"STATUS_DELETED",
+ "STATUS_BUILDING",
]
] = None
diff --git a/src/gradient/types/agent_update_params.py b/src/gradient/types/agent_update_params.py
index 75c30cba..5026beaa 100644
--- a/src/gradient/types/agent_update_params.py
+++ b/src/gradient/types/agent_update_params.py
@@ -14,6 +14,12 @@
class AgentUpdateParams(TypedDict, total=False):
agent_log_insights_enabled: bool
+ allowed_domains: SequenceNotStr[str]
+ """
+ Optional list of allowed domains for the chatbot - Must use fully qualified
+ domain name (FQDN) such as https://example.com
+ """
+
anthropic_key_uuid: str
"""Optional anthropic key uuid for use with anthropic models"""
@@ -41,6 +47,9 @@ class AgentUpdateParams(TypedDict, total=False):
response.
"""
+ model_provider_key_uuid: str
+ """Optional Model Provider uuid for use with provider models"""
+
model_uuid: str
"""Identifier for the foundation model."""
diff --git a/src/gradient/types/agents/evaluation_metrics/__init__.py b/src/gradient/types/agents/evaluation_metrics/__init__.py
index 01ce5ed2..ade2b376 100644
--- a/src/gradient/types/agents/evaluation_metrics/__init__.py
+++ b/src/gradient/types/agents/evaluation_metrics/__init__.py
@@ -18,6 +18,10 @@
from .oauth2_generate_url_params import Oauth2GenerateURLParams as Oauth2GenerateURLParams
from .workspace_retrieve_response import WorkspaceRetrieveResponse as WorkspaceRetrieveResponse
from .oauth2_generate_url_response import Oauth2GenerateURLResponse as Oauth2GenerateURLResponse
+from .scheduled_indexing_create_params import ScheduledIndexingCreateParams as ScheduledIndexingCreateParams
+from .scheduled_indexing_create_response import ScheduledIndexingCreateResponse as ScheduledIndexingCreateResponse
+from .scheduled_indexing_delete_response import ScheduledIndexingDeleteResponse as ScheduledIndexingDeleteResponse
+from .scheduled_indexing_retrieve_response import ScheduledIndexingRetrieveResponse as ScheduledIndexingRetrieveResponse
from .workspace_list_evaluation_test_cases_response import (
WorkspaceListEvaluationTestCasesResponse as WorkspaceListEvaluationTestCasesResponse,
)
diff --git a/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_create_params.py b/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_create_params.py
new file mode 100644
index 00000000..209766b4
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_create_params.py
@@ -0,0 +1,22 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Iterable
+from typing_extensions import TypedDict
+
+__all__ = ["ScheduledIndexingCreateParams"]
+
+
+class ScheduledIndexingCreateParams(TypedDict, total=False):
+ days: Iterable[int]
+ """Days for execution (day is represented same as in a cron expression, e.g.
+
+ Monday begins with 1 )
+ """
+
+ knowledge_base_uuid: str
+ """Knowledge base uuid for which the schedule is created"""
+
+ time: str
+ """Time of execution (HH:MM) UTC"""
diff --git a/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_create_response.py b/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_create_response.py
new file mode 100644
index 00000000..c306c5b1
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_create_response.py
@@ -0,0 +1,48 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+
+from ...._models import BaseModel
+
+__all__ = ["ScheduledIndexingCreateResponse", "IndexingInfo"]
+
+
+class IndexingInfo(BaseModel):
+ created_at: Optional[datetime] = None
+ """Created at timestamp"""
+
+ days: Optional[List[int]] = None
+ """Days for execution (day is represented same as in a cron expression, e.g.
+
+ Monday begins with 1 )
+ """
+
+ deleted_at: Optional[datetime] = None
+ """Deleted at timestamp (if soft deleted)"""
+
+ is_active: Optional[bool] = None
+ """Whether the schedule is currently active"""
+
+ knowledge_base_uuid: Optional[str] = None
+ """Knowledge base uuid associated with this schedule"""
+
+ last_ran_at: Optional[datetime] = None
+ """Last time the schedule was executed"""
+
+ next_run_at: Optional[datetime] = None
+ """Next scheduled run"""
+
+ time: Optional[str] = None
+ """Scheduled time of execution (HH:MM:SS format)"""
+
+ updated_at: Optional[datetime] = None
+ """Updated at timestamp"""
+
+ uuid: Optional[str] = None
+ """Unique identifier for the scheduled indexing entry"""
+
+
+class ScheduledIndexingCreateResponse(BaseModel):
+ indexing_info: Optional[IndexingInfo] = None
+ """Metadata for scheduled indexing entries"""
diff --git a/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_delete_response.py b/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_delete_response.py
new file mode 100644
index 00000000..febf3759
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_delete_response.py
@@ -0,0 +1,48 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+
+from ...._models import BaseModel
+
+__all__ = ["ScheduledIndexingDeleteResponse", "IndexingInfo"]
+
+
+class IndexingInfo(BaseModel):
+ created_at: Optional[datetime] = None
+ """Created at timestamp"""
+
+ days: Optional[List[int]] = None
+ """Days for execution (day is represented same as in a cron expression, e.g.
+
+ Monday begins with 1 )
+ """
+
+ deleted_at: Optional[datetime] = None
+ """Deleted at timestamp (if soft deleted)"""
+
+ is_active: Optional[bool] = None
+ """Whether the schedule is currently active"""
+
+ knowledge_base_uuid: Optional[str] = None
+ """Knowledge base uuid associated with this schedule"""
+
+ last_ran_at: Optional[datetime] = None
+ """Last time the schedule was executed"""
+
+ next_run_at: Optional[datetime] = None
+ """Next scheduled run"""
+
+ time: Optional[str] = None
+ """Scheduled time of execution (HH:MM:SS format)"""
+
+ updated_at: Optional[datetime] = None
+ """Updated at timestamp"""
+
+ uuid: Optional[str] = None
+ """Unique identifier for the scheduled indexing entry"""
+
+
+class ScheduledIndexingDeleteResponse(BaseModel):
+ indexing_info: Optional[IndexingInfo] = None
+ """Metadata for scheduled indexing entries"""
diff --git a/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_retrieve_response.py b/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_retrieve_response.py
new file mode 100644
index 00000000..1776c83d
--- /dev/null
+++ b/src/gradient/types/agents/evaluation_metrics/scheduled_indexing_retrieve_response.py
@@ -0,0 +1,48 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+
+from ...._models import BaseModel
+
+__all__ = ["ScheduledIndexingRetrieveResponse", "IndexingInfo"]
+
+
+class IndexingInfo(BaseModel):
+ created_at: Optional[datetime] = None
+ """Created at timestamp"""
+
+ days: Optional[List[int]] = None
+ """Days for execution (day is represented same as in a cron expression, e.g.
+
+ Monday begins with 1 )
+ """
+
+ deleted_at: Optional[datetime] = None
+ """Deleted at timestamp (if soft deleted)"""
+
+ is_active: Optional[bool] = None
+ """Whether the schedule is currently active"""
+
+ knowledge_base_uuid: Optional[str] = None
+ """Knowledge base uuid associated with this schedule"""
+
+ last_ran_at: Optional[datetime] = None
+ """Last time the schedule was executed"""
+
+ next_run_at: Optional[datetime] = None
+ """Next scheduled run"""
+
+ time: Optional[str] = None
+ """Scheduled time of execution (HH:MM:SS format)"""
+
+ updated_at: Optional[datetime] = None
+ """Updated at timestamp"""
+
+ uuid: Optional[str] = None
+ """Unique identifier for the scheduled indexing entry"""
+
+
+class ScheduledIndexingRetrieveResponse(BaseModel):
+ indexing_info: Optional[IndexingInfo] = None
+ """Metadata for scheduled indexing entries"""
diff --git a/src/gradient/types/api_agent.py b/src/gradient/types/api_agent.py
index abfbe828..f52e44c8 100644
--- a/src/gradient/types/api_agent.py
+++ b/src/gradient/types/api_agent.py
@@ -6,6 +6,8 @@
from datetime import datetime
from typing_extensions import Literal
+from pydantic import Field as FieldInfo
+
from .._models import BaseModel
from .api_agent_model import APIAgentModel
from .api_knowledge_base import APIKnowledgeBase
@@ -24,6 +26,7 @@
"Function",
"Guardrail",
"LoggingConfig",
+ "ModelProviderKey",
"Template",
"TemplateGuardrail",
]
@@ -35,6 +38,8 @@ class APIKey(BaseModel):
class Chatbot(BaseModel):
+ allowed_domains: Optional[List[str]] = None
+
button_background_color: Optional[str] = None
logo: Optional[str] = None
@@ -72,6 +77,7 @@ class Deployment(BaseModel):
"STATUS_UNDEPLOYING",
"STATUS_UNDEPLOYMENT_FAILED",
"STATUS_DELETED",
+ "STATUS_BUILDING",
]
] = None
@@ -186,6 +192,33 @@ class LoggingConfig(BaseModel):
"""Name of the log stream"""
+class ModelProviderKey(BaseModel):
+ api_key_uuid: Optional[str] = None
+ """API key ID"""
+
+ created_at: Optional[datetime] = None
+ """Key creation date"""
+
+ created_by: Optional[str] = None
+ """Created by user id from DO"""
+
+ deleted_at: Optional[datetime] = None
+ """Key deleted date"""
+
+ models: Optional[List[APIAgentModel]] = None
+ """Models supported by the openAI api key"""
+
+ name: Optional[str] = None
+ """Name of the key"""
+
+ provider: Optional[Literal["MODEL_PROVIDER_DIGITALOCEAN", "MODEL_PROVIDER_ANTHROPIC", "MODEL_PROVIDER_OPENAI"]] = (
+ None
+ )
+
+ updated_at: Optional[datetime] = None
+ """Key last updated date"""
+
+
class TemplateGuardrail(BaseModel):
priority: Optional[int] = None
"""Priority of the guardrail"""
@@ -311,6 +344,8 @@ class APIAgent(BaseModel):
model: Optional[APIAgentModel] = None
"""Description of a Model"""
+ api_model_provider_key: Optional[ModelProviderKey] = FieldInfo(alias="model_provider_key", default=None)
+
name: Optional[str] = None
"""Agent name"""
@@ -372,6 +407,11 @@ class APIAgent(BaseModel):
version_hash: Optional[str] = None
"""The latest version of the agent"""
+ vpc_egress_ips: Optional[List[str]] = None
+ """VPC Egress IPs"""
+
+ vpc_uuid: Optional[str] = None
+
workspace: Optional["APIWorkspace"] = None
diff --git a/src/gradient/types/knowledge_base_create_params.py b/src/gradient/types/knowledge_base_create_params.py
index e40bd598..4dc42098 100644
--- a/src/gradient/types/knowledge_base_create_params.py
+++ b/src/gradient/types/knowledge_base_create_params.py
@@ -11,7 +11,7 @@
from .knowledge_bases.api_file_upload_data_source_param import APIFileUploadDataSourceParam
from .knowledge_bases.api_web_crawler_data_source_param import APIWebCrawlerDataSourceParam
-__all__ = ["KnowledgeBaseCreateParams", "Datasource", "DatasourceDropboxDataSource"]
+__all__ = ["KnowledgeBaseCreateParams", "Datasource", "DatasourceDropboxDataSource", "DatasourceGoogleDriveDataSource"]
class KnowledgeBaseCreateParams(TypedDict, total=False):
@@ -63,6 +63,17 @@ class DatasourceDropboxDataSource(TypedDict, total=False):
"""
+class DatasourceGoogleDriveDataSource(TypedDict, total=False):
+ folder_id: str
+
+ refresh_token: str
+ """Refresh token.
+
+ you can obrain a refresh token by following the oauth2 flow. see
+ /v2/gen-ai/oauth2/google/tokens for reference.
+ """
+
+
class Datasource(TypedDict, total=False):
aws_data_source: AwsDataSourceParam
"""AWS S3 Data Source"""
@@ -79,6 +90,9 @@ class Datasource(TypedDict, total=False):
file_upload_data_source: APIFileUploadDataSourceParam
"""File to upload as data source for knowledge base."""
+ google_drive_data_source: DatasourceGoogleDriveDataSource
+ """Google Drive Data Source"""
+
item_path: str
spaces_data_source: APISpacesDataSourceParam
diff --git a/src/gradient/types/knowledge_base_list_indexing_jobs_response.py b/src/gradient/types/knowledge_base_list_indexing_jobs_response.py
new file mode 100644
index 00000000..d88f83fc
--- /dev/null
+++ b/src/gradient/types/knowledge_base_list_indexing_jobs_response.py
@@ -0,0 +1,21 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+
+from .._models import BaseModel
+from .shared.api_meta import APIMeta
+from .shared.api_links import APILinks
+from .knowledge_bases.api_indexing_job import APIIndexingJob
+
+__all__ = ["KnowledgeBaseListIndexingJobsResponse"]
+
+
+class KnowledgeBaseListIndexingJobsResponse(BaseModel):
+ jobs: Optional[List[APIIndexingJob]] = None
+ """The indexing jobs"""
+
+ links: Optional[APILinks] = None
+ """Links to other pages"""
+
+ meta: Optional[APIMeta] = None
+ """Meta information about the data set"""
diff --git a/src/gradient/types/knowledge_bases/__init__.py b/src/gradient/types/knowledge_bases/__init__.py
index cab865fa..a8ce2cc7 100644
--- a/src/gradient/types/knowledge_bases/__init__.py
+++ b/src/gradient/types/knowledge_bases/__init__.py
@@ -27,6 +27,9 @@
from .data_source_create_presigned_urls_params import (
DataSourceCreatePresignedURLsParams as DataSourceCreatePresignedURLsParams,
)
+from .indexing_job_retrieve_signed_url_response import (
+ IndexingJobRetrieveSignedURLResponse as IndexingJobRetrieveSignedURLResponse,
+)
from .data_source_create_presigned_urls_response import (
DataSourceCreatePresignedURLsResponse as DataSourceCreatePresignedURLsResponse,
)
diff --git a/src/gradient/types/knowledge_bases/api_indexed_data_source.py b/src/gradient/types/knowledge_bases/api_indexed_data_source.py
index 151b29de..3f011582 100644
--- a/src/gradient/types/knowledge_bases/api_indexed_data_source.py
+++ b/src/gradient/types/knowledge_bases/api_indexed_data_source.py
@@ -48,6 +48,7 @@ class APIIndexedDataSource(BaseModel):
"DATA_SOURCE_STATUS_PARTIALLY_UPDATED",
"DATA_SOURCE_STATUS_NOT_UPDATED",
"DATA_SOURCE_STATUS_FAILED",
+ "DATA_SOURCE_STATUS_CANCELLED",
]
] = None
diff --git a/src/gradient/types/knowledge_bases/api_indexing_job.py b/src/gradient/types/knowledge_bases/api_indexing_job.py
index 312e465c..93124cf8 100644
--- a/src/gradient/types/knowledge_bases/api_indexing_job.py
+++ b/src/gradient/types/knowledge_bases/api_indexing_job.py
@@ -5,6 +5,7 @@
from typing_extensions import Literal
from ..._models import BaseModel
+from .api_indexed_data_source import APIIndexedDataSource
__all__ = ["APIIndexingJob"]
@@ -16,10 +17,16 @@ class APIIndexingJob(BaseModel):
created_at: Optional[datetime] = None
"""Creation date / time"""
+ data_source_jobs: Optional[List[APIIndexedDataSource]] = None
+ """Details on Data Sources included in the Indexing Job"""
+
data_source_uuids: Optional[List[str]] = None
finished_at: Optional[datetime] = None
+ is_report_available: Optional[bool] = None
+ """Boolean value to determine if the indexing job details are available"""
+
knowledge_base_uuid: Optional[str] = None
"""Knowledge base id"""
@@ -50,7 +57,7 @@ class APIIndexingJob(BaseModel):
] = None
tokens: Optional[int] = None
- """Number of tokens"""
+ """Number of tokens [This field is deprecated]"""
total_datasources: Optional[int] = None
"""Number of datasources being indexed"""
@@ -61,9 +68,15 @@ class APIIndexingJob(BaseModel):
total_items_indexed: Optional[str] = None
"""Total Items Indexed"""
+ total_items_removed: Optional[str] = None
+ """Total Items Removed"""
+
total_items_skipped: Optional[str] = None
"""Total Items Skipped"""
+ total_tokens: Optional[str] = None
+ """Total Tokens Consumed By the Indexing Job"""
+
updated_at: Optional[datetime] = None
"""Last modified"""
diff --git a/src/gradient/types/knowledge_bases/api_knowledge_base_data_source.py b/src/gradient/types/knowledge_bases/api_knowledge_base_data_source.py
index ed370eb5..223797c7 100644
--- a/src/gradient/types/knowledge_bases/api_knowledge_base_data_source.py
+++ b/src/gradient/types/knowledge_bases/api_knowledge_base_data_source.py
@@ -10,7 +10,7 @@
from .api_file_upload_data_source import APIFileUploadDataSource
from .api_web_crawler_data_source import APIWebCrawlerDataSource
-__all__ = ["APIKnowledgeBaseDataSource", "AwsDataSource", "DropboxDataSource"]
+__all__ = ["APIKnowledgeBaseDataSource", "AwsDataSource", "DropboxDataSource", "GoogleDriveDataSource"]
class AwsDataSource(BaseModel):
@@ -27,6 +27,13 @@ class DropboxDataSource(BaseModel):
folder: Optional[str] = None
+class GoogleDriveDataSource(BaseModel):
+ folder_id: Optional[str] = None
+
+ folder_name: Optional[str] = None
+ """Name of the selected folder if available"""
+
+
class APIKnowledgeBaseDataSource(BaseModel):
aws_data_source: Optional[AwsDataSource] = None
"""AWS S3 Data Source for Display"""
@@ -43,6 +50,9 @@ class APIKnowledgeBaseDataSource(BaseModel):
file_upload_data_source: Optional[APIFileUploadDataSource] = None
"""File to upload as data source for knowledge base."""
+ google_drive_data_source: Optional[GoogleDriveDataSource] = None
+ """Google Drive Data Source for Display"""
+
item_path: Optional[str] = None
"""Path of folder or object in bucket - Deprecated, moved to data_source_details"""
diff --git a/src/gradient/types/knowledge_bases/api_web_crawler_data_source.py b/src/gradient/types/knowledge_bases/api_web_crawler_data_source.py
index 4690c607..63c9111a 100644
--- a/src/gradient/types/knowledge_bases/api_web_crawler_data_source.py
+++ b/src/gradient/types/knowledge_bases/api_web_crawler_data_source.py
@@ -1,6 +1,6 @@
# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
-from typing import Optional
+from typing import List, Optional
from typing_extensions import Literal
from ..._models import BaseModel
@@ -24,3 +24,6 @@ class APIWebCrawlerDataSource(BaseModel):
embed_media: Optional[bool] = None
"""Whether to ingest and index media (images, etc.) on web pages."""
+
+ exclude_tags: Optional[List[str]] = None
+ """Declaring which tags to exclude in web pages while webcrawling"""
diff --git a/src/gradient/types/knowledge_bases/api_web_crawler_data_source_param.py b/src/gradient/types/knowledge_bases/api_web_crawler_data_source_param.py
index 2345ed3a..17988e73 100644
--- a/src/gradient/types/knowledge_bases/api_web_crawler_data_source_param.py
+++ b/src/gradient/types/knowledge_bases/api_web_crawler_data_source_param.py
@@ -4,6 +4,8 @@
from typing_extensions import Literal, TypedDict
+from ..._types import SequenceNotStr
+
__all__ = ["APIWebCrawlerDataSourceParam"]
@@ -23,3 +25,6 @@ class APIWebCrawlerDataSourceParam(TypedDict, total=False):
embed_media: bool
"""Whether to ingest and index media (images, etc.) on web pages."""
+
+ exclude_tags: SequenceNotStr[str]
+ """Declaring which tags to exclude in web pages while webcrawling"""
diff --git a/src/gradient/types/knowledge_bases/indexing_job_retrieve_signed_url_response.py b/src/gradient/types/knowledge_bases/indexing_job_retrieve_signed_url_response.py
new file mode 100644
index 00000000..2ef60e45
--- /dev/null
+++ b/src/gradient/types/knowledge_bases/indexing_job_retrieve_signed_url_response.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+
+from ..._models import BaseModel
+
+__all__ = ["IndexingJobRetrieveSignedURLResponse"]
+
+
+class IndexingJobRetrieveSignedURLResponse(BaseModel):
+ signed_url: Optional[str] = None
+ """The signed url for downloading the indexing job details"""
diff --git a/src/gradient/types/nf_create_params.py b/src/gradient/types/nf_create_params.py
new file mode 100644
index 00000000..327beb2e
--- /dev/null
+++ b/src/gradient/types/nf_create_params.py
@@ -0,0 +1,23 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+from .._types import SequenceNotStr
+
+__all__ = ["NfCreateParams"]
+
+
+class NfCreateParams(TypedDict, total=False):
+ name: Required[str]
+ """The human-readable name of the share."""
+
+ region: Required[str]
+ """The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides."""
+
+ size_gib: Required[int]
+ """The desired/provisioned size of the share in GiB (Gibibytes). Must be >= 50."""
+
+ vpc_ids: Required[SequenceNotStr[str]]
+ """List of VPC IDs that should be able to access the share."""
diff --git a/src/gradient/types/nf_create_response.py b/src/gradient/types/nf_create_response.py
new file mode 100644
index 00000000..5016d776
--- /dev/null
+++ b/src/gradient/types/nf_create_response.py
@@ -0,0 +1,45 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["NfCreateResponse", "Share"]
+
+
+class Share(BaseModel):
+ id: str
+ """The unique identifier of the NFS share."""
+
+ created_at: datetime
+ """Timestamp for when the NFS share was created."""
+
+ name: str
+ """The human-readable name of the share."""
+
+ region: str
+ """The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides."""
+
+ size_gib: int
+ """The desired/provisioned size of the share in GiB (Gibibytes). Must be >= 50."""
+
+ status: Literal["CREATING", "ACTIVE", "FAILED", "DELETED"]
+ """The current status of the share."""
+
+ host: Optional[str] = None
+ """The host IP of the NFS server that will be accessible from the associated VPC"""
+
+ mount_path: Optional[str] = None
+ """
+ Path at which the share will be available, to be mounted at a target of the
+ user's choice within the client
+ """
+
+ vpc_ids: Optional[List[str]] = None
+ """List of VPC IDs that should be able to access the share."""
+
+
+class NfCreateResponse(BaseModel):
+ share: Optional[Share] = None
diff --git a/src/gradient/types/nf_delete_params.py b/src/gradient/types/nf_delete_params.py
new file mode 100644
index 00000000..a11474e5
--- /dev/null
+++ b/src/gradient/types/nf_delete_params.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["NfDeleteParams"]
+
+
+class NfDeleteParams(TypedDict, total=False):
+ region: Required[str]
+ """The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides."""
diff --git a/src/gradient/types/nf_initiate_action_params.py b/src/gradient/types/nf_initiate_action_params.py
new file mode 100644
index 00000000..a187f56d
--- /dev/null
+++ b/src/gradient/types/nf_initiate_action_params.py
@@ -0,0 +1,47 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing import Union
+from typing_extensions import Literal, Required, TypeAlias, TypedDict
+
+__all__ = [
+ "NfInitiateActionParams",
+ "NfsActionResize",
+ "NfsActionResizeParams",
+ "NfsActionSnapshot",
+ "NfsActionSnapshotParams",
+]
+
+
+class NfsActionResize(TypedDict, total=False):
+ region: Required[str]
+ """The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides."""
+
+ type: Required[Literal["resize", "snapshot"]]
+ """The type of action to initiate for the NFS share (such as resize or snapshot)."""
+
+ params: NfsActionResizeParams
+
+
+class NfsActionResizeParams(TypedDict, total=False):
+ size_gib: Required[int]
+ """The new size for the NFS share."""
+
+
+class NfsActionSnapshot(TypedDict, total=False):
+ region: Required[str]
+ """The DigitalOcean region slug (e.g. atl1, nyc2) where the NFS snapshot resides."""
+
+ type: Required[Literal["resize", "snapshot"]]
+ """The type of action to initiate for the NFS share (such as resize or snapshot)."""
+
+ params: NfsActionSnapshotParams
+
+
+class NfsActionSnapshotParams(TypedDict, total=False):
+ name: Required[str]
+ """Snapshot name of the NFS share"""
+
+
+NfInitiateActionParams: TypeAlias = Union[NfsActionResize, NfsActionSnapshot]
diff --git a/src/gradient/types/nf_initiate_action_response.py b/src/gradient/types/nf_initiate_action_response.py
new file mode 100644
index 00000000..9f38a4b2
--- /dev/null
+++ b/src/gradient/types/nf_initiate_action_response.py
@@ -0,0 +1,33 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from datetime import datetime
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["NfInitiateActionResponse", "Action"]
+
+
+class Action(BaseModel):
+ region_slug: str
+ """The DigitalOcean region slug where the resource is located."""
+
+ resource_id: str
+ """The unique identifier of the resource on which the action is being performed."""
+
+ resource_type: Literal["network_file_share", "network_file_share_snapshot"]
+ """The type of resource on which the action is being performed."""
+
+ started_at: datetime
+ """The timestamp when the action was started."""
+
+ status: Literal["in-progress", "completed", "errored"]
+ """The current status of the action."""
+
+ type: str
+ """The type of action being performed."""
+
+
+class NfInitiateActionResponse(BaseModel):
+ action: Action
+ """The action that was submitted."""
diff --git a/src/gradient/types/nf_list_params.py b/src/gradient/types/nf_list_params.py
new file mode 100644
index 00000000..bc53c284
--- /dev/null
+++ b/src/gradient/types/nf_list_params.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["NfListParams"]
+
+
+class NfListParams(TypedDict, total=False):
+ region: Required[str]
+ """The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides."""
diff --git a/src/gradient/types/nf_list_response.py b/src/gradient/types/nf_list_response.py
new file mode 100644
index 00000000..c5af118b
--- /dev/null
+++ b/src/gradient/types/nf_list_response.py
@@ -0,0 +1,45 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["NfListResponse", "Share"]
+
+
+class Share(BaseModel):
+ id: str
+ """The unique identifier of the NFS share."""
+
+ created_at: datetime
+ """Timestamp for when the NFS share was created."""
+
+ name: str
+ """The human-readable name of the share."""
+
+ region: str
+ """The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides."""
+
+ size_gib: int
+ """The desired/provisioned size of the share in GiB (Gibibytes). Must be >= 50."""
+
+ status: Literal["CREATING", "ACTIVE", "FAILED", "DELETED"]
+ """The current status of the share."""
+
+ host: Optional[str] = None
+ """The host IP of the NFS server that will be accessible from the associated VPC"""
+
+ mount_path: Optional[str] = None
+ """
+ Path at which the share will be available, to be mounted at a target of the
+ user's choice within the client
+ """
+
+ vpc_ids: Optional[List[str]] = None
+ """List of VPC IDs that should be able to access the share."""
+
+
+class NfListResponse(BaseModel):
+ shares: Optional[List[Share]] = None
diff --git a/src/gradient/types/nf_retrieve_params.py b/src/gradient/types/nf_retrieve_params.py
new file mode 100644
index 00000000..292053d9
--- /dev/null
+++ b/src/gradient/types/nf_retrieve_params.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["NfRetrieveParams"]
+
+
+class NfRetrieveParams(TypedDict, total=False):
+ region: Required[str]
+ """The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides."""
diff --git a/src/gradient/types/nf_retrieve_response.py b/src/gradient/types/nf_retrieve_response.py
new file mode 100644
index 00000000..897f07f0
--- /dev/null
+++ b/src/gradient/types/nf_retrieve_response.py
@@ -0,0 +1,45 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from .._models import BaseModel
+
+__all__ = ["NfRetrieveResponse", "Share"]
+
+
+class Share(BaseModel):
+ id: str
+ """The unique identifier of the NFS share."""
+
+ created_at: datetime
+ """Timestamp for when the NFS share was created."""
+
+ name: str
+ """The human-readable name of the share."""
+
+ region: str
+ """The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides."""
+
+ size_gib: int
+ """The desired/provisioned size of the share in GiB (Gibibytes). Must be >= 50."""
+
+ status: Literal["CREATING", "ACTIVE", "FAILED", "DELETED"]
+ """The current status of the share."""
+
+ host: Optional[str] = None
+ """The host IP of the NFS server that will be accessible from the associated VPC"""
+
+ mount_path: Optional[str] = None
+ """
+ Path at which the share will be available, to be mounted at a target of the
+ user's choice within the client
+ """
+
+ vpc_ids: Optional[List[str]] = None
+ """List of VPC IDs that should be able to access the share."""
+
+
+class NfRetrieveResponse(BaseModel):
+ share: Optional[Share] = None
diff --git a/src/gradient/types/nfs/__init__.py b/src/gradient/types/nfs/__init__.py
new file mode 100644
index 00000000..41777980
--- /dev/null
+++ b/src/gradient/types/nfs/__init__.py
@@ -0,0 +1,9 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from .snapshot_list_params import SnapshotListParams as SnapshotListParams
+from .snapshot_delete_params import SnapshotDeleteParams as SnapshotDeleteParams
+from .snapshot_list_response import SnapshotListResponse as SnapshotListResponse
+from .snapshot_retrieve_params import SnapshotRetrieveParams as SnapshotRetrieveParams
+from .snapshot_retrieve_response import SnapshotRetrieveResponse as SnapshotRetrieveResponse
diff --git a/src/gradient/types/nfs/snapshot_delete_params.py b/src/gradient/types/nfs/snapshot_delete_params.py
new file mode 100644
index 00000000..1b26149e
--- /dev/null
+++ b/src/gradient/types/nfs/snapshot_delete_params.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["SnapshotDeleteParams"]
+
+
+class SnapshotDeleteParams(TypedDict, total=False):
+ region: Required[str]
+ """The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides."""
diff --git a/src/gradient/types/nfs/snapshot_list_params.py b/src/gradient/types/nfs/snapshot_list_params.py
new file mode 100644
index 00000000..8c4c6946
--- /dev/null
+++ b/src/gradient/types/nfs/snapshot_list_params.py
@@ -0,0 +1,18 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["SnapshotListParams"]
+
+
+class SnapshotListParams(TypedDict, total=False):
+ region: Required[str]
+ """The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides."""
+
+ share_id: str
+ """The unique ID of an NFS share.
+
+ If provided, only snapshots of this specific share will be returned.
+ """
diff --git a/src/gradient/types/nfs/snapshot_list_response.py b/src/gradient/types/nfs/snapshot_list_response.py
new file mode 100644
index 00000000..8a6864dc
--- /dev/null
+++ b/src/gradient/types/nfs/snapshot_list_response.py
@@ -0,0 +1,36 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import List, Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["SnapshotListResponse", "Snapshot"]
+
+
+class Snapshot(BaseModel):
+ id: str
+ """The unique identifier of the snapshot."""
+
+ created_at: datetime
+ """The timestamp when the snapshot was created."""
+
+ name: str
+ """The human-readable name of the snapshot."""
+
+ region: str
+ """The DigitalOcean region slug where the snapshot is located."""
+
+ share_id: str
+ """The unique identifier of the share from which this snapshot was created."""
+
+ size_gib: int
+ """The size of the snapshot in GiB."""
+
+ status: Literal["UNKNOWN", "CREATING", "ACTIVE", "FAILED", "DELETED"]
+ """The current status of the snapshot."""
+
+
+class SnapshotListResponse(BaseModel):
+ snapshots: Optional[List[Snapshot]] = None
diff --git a/src/gradient/types/nfs/snapshot_retrieve_params.py b/src/gradient/types/nfs/snapshot_retrieve_params.py
new file mode 100644
index 00000000..d1e1f8e8
--- /dev/null
+++ b/src/gradient/types/nfs/snapshot_retrieve_params.py
@@ -0,0 +1,12 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+from typing_extensions import Required, TypedDict
+
+__all__ = ["SnapshotRetrieveParams"]
+
+
+class SnapshotRetrieveParams(TypedDict, total=False):
+ region: Required[str]
+ """The DigitalOcean region slug (e.g., nyc2, atl1) where the NFS share resides."""
diff --git a/src/gradient/types/nfs/snapshot_retrieve_response.py b/src/gradient/types/nfs/snapshot_retrieve_response.py
new file mode 100644
index 00000000..2d54d523
--- /dev/null
+++ b/src/gradient/types/nfs/snapshot_retrieve_response.py
@@ -0,0 +1,37 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from typing import Optional
+from datetime import datetime
+from typing_extensions import Literal
+
+from ..._models import BaseModel
+
+__all__ = ["SnapshotRetrieveResponse", "Snapshot"]
+
+
+class Snapshot(BaseModel):
+ id: str
+ """The unique identifier of the snapshot."""
+
+ created_at: datetime
+ """The timestamp when the snapshot was created."""
+
+ name: str
+ """The human-readable name of the snapshot."""
+
+ region: str
+ """The DigitalOcean region slug where the snapshot is located."""
+
+ share_id: str
+ """The unique identifier of the share from which this snapshot was created."""
+
+ size_gib: int
+ """The size of the snapshot in GiB."""
+
+ status: Literal["UNKNOWN", "CREATING", "ACTIVE", "FAILED", "DELETED"]
+ """The current status of the snapshot."""
+
+
+class SnapshotRetrieveResponse(BaseModel):
+ snapshot: Optional[Snapshot] = None
+ """Represents an NFS snapshot."""
diff --git a/tests/api_resources/agents/evaluation_metrics/test_scheduled_indexing.py b/tests/api_resources/agents/evaluation_metrics/test_scheduled_indexing.py
new file mode 100644
index 00000000..388e06c9
--- /dev/null
+++ b/tests/api_resources/agents/evaluation_metrics/test_scheduled_indexing.py
@@ -0,0 +1,274 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.agents.evaluation_metrics import (
+ ScheduledIndexingCreateResponse,
+ ScheduledIndexingDeleteResponse,
+ ScheduledIndexingRetrieveResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestScheduledIndexing:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_create(self, client: Gradient) -> None:
+ scheduled_indexing = client.agents.evaluation_metrics.scheduled_indexing.create()
+ assert_matches_type(ScheduledIndexingCreateResponse, scheduled_indexing, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_create_with_all_params(self, client: Gradient) -> None:
+ scheduled_indexing = client.agents.evaluation_metrics.scheduled_indexing.create(
+ days=[123],
+ knowledge_base_uuid="123e4567-e89b-12d3-a456-426614174000",
+ time="example string",
+ )
+ assert_matches_type(ScheduledIndexingCreateResponse, scheduled_indexing, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_raw_response_create(self, client: Gradient) -> None:
+ response = client.agents.evaluation_metrics.scheduled_indexing.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ scheduled_indexing = response.parse()
+ assert_matches_type(ScheduledIndexingCreateResponse, scheduled_indexing, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_streaming_response_create(self, client: Gradient) -> None:
+ with client.agents.evaluation_metrics.scheduled_indexing.with_streaming_response.create() as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ scheduled_indexing = response.parse()
+ assert_matches_type(ScheduledIndexingCreateResponse, scheduled_indexing, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ scheduled_indexing = client.agents.evaluation_metrics.scheduled_indexing.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(ScheduledIndexingRetrieveResponse, scheduled_indexing, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.agents.evaluation_metrics.scheduled_indexing.with_raw_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ scheduled_indexing = response.parse()
+ assert_matches_type(ScheduledIndexingRetrieveResponse, scheduled_indexing, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.agents.evaluation_metrics.scheduled_indexing.with_streaming_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ scheduled_indexing = response.parse()
+ assert_matches_type(ScheduledIndexingRetrieveResponse, scheduled_indexing, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_path_params_retrieve(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"):
+ client.agents.evaluation_metrics.scheduled_indexing.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_delete(self, client: Gradient) -> None:
+ scheduled_indexing = client.agents.evaluation_metrics.scheduled_indexing.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(ScheduledIndexingDeleteResponse, scheduled_indexing, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_raw_response_delete(self, client: Gradient) -> None:
+ response = client.agents.evaluation_metrics.scheduled_indexing.with_raw_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ scheduled_indexing = response.parse()
+ assert_matches_type(ScheduledIndexingDeleteResponse, scheduled_indexing, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_streaming_response_delete(self, client: Gradient) -> None:
+ with client.agents.evaluation_metrics.scheduled_indexing.with_streaming_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ scheduled_indexing = response.parse()
+ assert_matches_type(ScheduledIndexingDeleteResponse, scheduled_indexing, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_path_params_delete(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ client.agents.evaluation_metrics.scheduled_indexing.with_raw_response.delete(
+ "",
+ )
+
+
+class TestAsyncScheduledIndexing:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradient) -> None:
+ scheduled_indexing = await async_client.agents.evaluation_metrics.scheduled_indexing.create()
+ assert_matches_type(ScheduledIndexingCreateResponse, scheduled_indexing, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_create_with_all_params(self, async_client: AsyncGradient) -> None:
+ scheduled_indexing = await async_client.agents.evaluation_metrics.scheduled_indexing.create(
+ days=[123],
+ knowledge_base_uuid="123e4567-e89b-12d3-a456-426614174000",
+ time="example string",
+ )
+ assert_matches_type(ScheduledIndexingCreateResponse, scheduled_indexing, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_metrics.scheduled_indexing.with_raw_response.create()
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ scheduled_indexing = await response.parse()
+ assert_matches_type(ScheduledIndexingCreateResponse, scheduled_indexing, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradient) -> None:
+ async with (
+ async_client.agents.evaluation_metrics.scheduled_indexing.with_streaming_response.create()
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ scheduled_indexing = await response.parse()
+ assert_matches_type(ScheduledIndexingCreateResponse, scheduled_indexing, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ scheduled_indexing = await async_client.agents.evaluation_metrics.scheduled_indexing.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(ScheduledIndexingRetrieveResponse, scheduled_indexing, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_metrics.scheduled_indexing.with_raw_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ scheduled_indexing = await response.parse()
+ assert_matches_type(ScheduledIndexingRetrieveResponse, scheduled_indexing, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_metrics.scheduled_indexing.with_streaming_response.retrieve(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ scheduled_indexing = await response.parse()
+ assert_matches_type(ScheduledIndexingRetrieveResponse, scheduled_indexing, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.scheduled_indexing.with_raw_response.retrieve(
+ "",
+ )
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradient) -> None:
+ scheduled_indexing = await async_client.agents.evaluation_metrics.scheduled_indexing.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(ScheduledIndexingDeleteResponse, scheduled_indexing, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None:
+ response = await async_client.agents.evaluation_metrics.scheduled_indexing.with_raw_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ scheduled_indexing = await response.parse()
+ assert_matches_type(ScheduledIndexingDeleteResponse, scheduled_indexing, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None:
+ async with async_client.agents.evaluation_metrics.scheduled_indexing.with_streaming_response.delete(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ scheduled_indexing = await response.parse()
+ assert_matches_type(ScheduledIndexingDeleteResponse, scheduled_indexing, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `uuid` but received ''"):
+ await async_client.agents.evaluation_metrics.scheduled_indexing.with_raw_response.delete(
+ "",
+ )
diff --git a/tests/api_resources/knowledge_bases/test_data_sources.py b/tests/api_resources/knowledge_bases/test_data_sources.py
index 4214f880..ca721d93 100644
--- a/tests/api_resources/knowledge_bases/test_data_sources.py
+++ b/tests/api_resources/knowledge_bases/test_data_sources.py
@@ -52,6 +52,7 @@ def test_method_create_with_all_params(self, client: Gradient) -> None:
"base_url": "example string",
"crawling_option": "UNKNOWN",
"embed_media": True,
+ "exclude_tags": ["example string"],
},
)
assert_matches_type(DataSourceCreateResponse, data_source, path=["response"])
@@ -273,6 +274,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradient)
"base_url": "example string",
"crawling_option": "UNKNOWN",
"embed_media": True,
+ "exclude_tags": ["example string"],
},
)
assert_matches_type(DataSourceCreateResponse, data_source, path=["response"])
diff --git a/tests/api_resources/knowledge_bases/test_indexing_jobs.py b/tests/api_resources/knowledge_bases/test_indexing_jobs.py
index 88c551c8..231aceff 100644
--- a/tests/api_resources/knowledge_bases/test_indexing_jobs.py
+++ b/tests/api_resources/knowledge_bases/test_indexing_jobs.py
@@ -15,6 +15,7 @@
IndexingJobCreateResponse,
IndexingJobRetrieveResponse,
IndexingJobUpdateCancelResponse,
+ IndexingJobRetrieveSignedURLResponse,
IndexingJobRetrieveDataSourcesResponse,
)
@@ -182,6 +183,48 @@ def test_path_params_retrieve_data_sources(self, client: Gradient) -> None:
"",
)
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_retrieve_signed_url(self, client: Gradient) -> None:
+ indexing_job = client.knowledge_bases.indexing_jobs.retrieve_signed_url(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(IndexingJobRetrieveSignedURLResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve_signed_url(self, client: Gradient) -> None:
+ response = client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_signed_url(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ indexing_job = response.parse()
+ assert_matches_type(IndexingJobRetrieveSignedURLResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve_signed_url(self, client: Gradient) -> None:
+ with client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve_signed_url(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ indexing_job = response.parse()
+ assert_matches_type(IndexingJobRetrieveSignedURLResponse, indexing_job, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_path_params_retrieve_signed_url(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"):
+ client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_signed_url(
+ "",
+ )
+
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
def test_method_update_cancel(self, client: Gradient) -> None:
@@ -516,6 +559,48 @@ async def test_path_params_retrieve_data_sources(self, async_client: AsyncGradie
"",
)
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_retrieve_signed_url(self, async_client: AsyncGradient) -> None:
+ indexing_job = await async_client.knowledge_bases.indexing_jobs.retrieve_signed_url(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(IndexingJobRetrieveSignedURLResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve_signed_url(self, async_client: AsyncGradient) -> None:
+ response = await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_signed_url(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ indexing_job = await response.parse()
+ assert_matches_type(IndexingJobRetrieveSignedURLResponse, indexing_job, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve_signed_url(self, async_client: AsyncGradient) -> None:
+ async with async_client.knowledge_bases.indexing_jobs.with_streaming_response.retrieve_signed_url(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ indexing_job = await response.parse()
+ assert_matches_type(IndexingJobRetrieveSignedURLResponse, indexing_job, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve_signed_url(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `indexing_job_uuid` but received ''"):
+ await async_client.knowledge_bases.indexing_jobs.with_raw_response.retrieve_signed_url(
+ "",
+ )
+
@pytest.mark.skip(reason="Prism tests are disabled")
@parametrize
async def test_method_update_cancel(self, async_client: AsyncGradient) -> None:
diff --git a/tests/api_resources/nfs/__init__.py b/tests/api_resources/nfs/__init__.py
new file mode 100644
index 00000000..fd8019a9
--- /dev/null
+++ b/tests/api_resources/nfs/__init__.py
@@ -0,0 +1 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
diff --git a/tests/api_resources/nfs/test_snapshots.py b/tests/api_resources/nfs/test_snapshots.py
new file mode 100644
index 00000000..e17265f3
--- /dev/null
+++ b/tests/api_resources/nfs/test_snapshots.py
@@ -0,0 +1,297 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types.nfs import (
+ SnapshotListResponse,
+ SnapshotRetrieveResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestSnapshots:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ snapshot = client.nfs.snapshots.retrieve(
+ nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ )
+ assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.nfs.snapshots.with_raw_response.retrieve(
+ nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ snapshot = response.parse()
+ assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.nfs.snapshots.with_streaming_response.retrieve(
+ nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ snapshot = response.parse()
+ assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_path_params_retrieve(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_snapshot_id` but received ''"):
+ client.nfs.snapshots.with_raw_response.retrieve(
+ nfs_snapshot_id="",
+ region="region",
+ )
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ snapshot = client.nfs.snapshots.list(
+ region="region",
+ )
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_list_with_all_params(self, client: Gradient) -> None:
+ snapshot = client.nfs.snapshots.list(
+ region="region",
+ share_id="share_id",
+ )
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.nfs.snapshots.with_raw_response.list(
+ region="region",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ snapshot = response.parse()
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.nfs.snapshots.with_streaming_response.list(
+ region="region",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ snapshot = response.parse()
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_delete(self, client: Gradient) -> None:
+ snapshot = client.nfs.snapshots.delete(
+ nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ )
+ assert snapshot is None
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_raw_response_delete(self, client: Gradient) -> None:
+ response = client.nfs.snapshots.with_raw_response.delete(
+ nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ snapshot = response.parse()
+ assert snapshot is None
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_streaming_response_delete(self, client: Gradient) -> None:
+ with client.nfs.snapshots.with_streaming_response.delete(
+ nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ snapshot = response.parse()
+ assert snapshot is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_path_params_delete(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_snapshot_id` but received ''"):
+ client.nfs.snapshots.with_raw_response.delete(
+ nfs_snapshot_id="",
+ region="region",
+ )
+
+
+class TestAsyncSnapshots:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ snapshot = await async_client.nfs.snapshots.retrieve(
+ nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ )
+ assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.nfs.snapshots.with_raw_response.retrieve(
+ nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ snapshot = await response.parse()
+ assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.nfs.snapshots.with_streaming_response.retrieve(
+ nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ snapshot = await response.parse()
+ assert_matches_type(SnapshotRetrieveResponse, snapshot, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_snapshot_id` but received ''"):
+ await async_client.nfs.snapshots.with_raw_response.retrieve(
+ nfs_snapshot_id="",
+ region="region",
+ )
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ snapshot = await async_client.nfs.snapshots.list(
+ region="region",
+ )
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_list_with_all_params(self, async_client: AsyncGradient) -> None:
+ snapshot = await async_client.nfs.snapshots.list(
+ region="region",
+ share_id="share_id",
+ )
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.nfs.snapshots.with_raw_response.list(
+ region="region",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ snapshot = await response.parse()
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.nfs.snapshots.with_streaming_response.list(
+ region="region",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ snapshot = await response.parse()
+ assert_matches_type(SnapshotListResponse, snapshot, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradient) -> None:
+ snapshot = await async_client.nfs.snapshots.delete(
+ nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ )
+ assert snapshot is None
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None:
+ response = await async_client.nfs.snapshots.with_raw_response.delete(
+ nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ snapshot = await response.parse()
+ assert snapshot is None
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None:
+ async with async_client.nfs.snapshots.with_streaming_response.delete(
+ nfs_snapshot_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ snapshot = await response.parse()
+ assert snapshot is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_snapshot_id` but received ''"):
+ await async_client.nfs.snapshots.with_raw_response.delete(
+ nfs_snapshot_id="",
+ region="region",
+ )
diff --git a/tests/api_resources/test_agents.py b/tests/api_resources/test_agents.py
index 1ba3e093..c9e59e32 100644
--- a/tests/api_resources/test_agents.py
+++ b/tests/api_resources/test_agents.py
@@ -40,12 +40,14 @@ def test_method_create_with_all_params(self, client: Gradient) -> None:
description='"My Agent Description"',
instruction='"You are an agent who thinks deeply about the world"',
knowledge_base_uuid=["example string"],
+ model_provider_key_uuid='"12345678-1234-1234-1234-123456789012"',
model_uuid='"12345678-1234-1234-1234-123456789012"',
name='"My Agent"',
openai_key_uuid='"12345678-1234-1234-1234-123456789012"',
project_id='"12345678-1234-1234-1234-123456789012"',
region='"tor1"',
tags=["example string"],
+ workspace_uuid="123e4567-e89b-12d3-a456-426614174000",
)
assert_matches_type(AgentCreateResponse, agent, path=["response"])
@@ -127,12 +129,14 @@ def test_method_update_with_all_params(self, client: Gradient) -> None:
agent = client.agents.update(
path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
agent_log_insights_enabled=True,
+ allowed_domains=["example string"],
anthropic_key_uuid='"12345678-1234-1234-1234-123456789012"',
conversation_logs_enabled=True,
description='"My Agent Description"',
instruction='"You are an agent who thinks deeply about the world"',
k=5,
max_tokens=100,
+ model_provider_key_uuid='"12345678-1234-1234-1234-123456789012"',
model_uuid='"12345678-1234-1234-1234-123456789012"',
name='"My New Agent Name"',
openai_key_uuid='"12345678-1234-1234-1234-123456789012"',
@@ -483,12 +487,14 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradient)
description='"My Agent Description"',
instruction='"You are an agent who thinks deeply about the world"',
knowledge_base_uuid=["example string"],
+ model_provider_key_uuid='"12345678-1234-1234-1234-123456789012"',
model_uuid='"12345678-1234-1234-1234-123456789012"',
name='"My Agent"',
openai_key_uuid='"12345678-1234-1234-1234-123456789012"',
project_id='"12345678-1234-1234-1234-123456789012"',
region='"tor1"',
tags=["example string"],
+ workspace_uuid="123e4567-e89b-12d3-a456-426614174000",
)
assert_matches_type(AgentCreateResponse, agent, path=["response"])
@@ -570,12 +576,14 @@ async def test_method_update_with_all_params(self, async_client: AsyncGradient)
agent = await async_client.agents.update(
path_uuid='"123e4567-e89b-12d3-a456-426614174000"',
agent_log_insights_enabled=True,
+ allowed_domains=["example string"],
anthropic_key_uuid='"12345678-1234-1234-1234-123456789012"',
conversation_logs_enabled=True,
description='"My Agent Description"',
instruction='"You are an agent who thinks deeply about the world"',
k=5,
max_tokens=100,
+ model_provider_key_uuid='"12345678-1234-1234-1234-123456789012"',
model_uuid='"12345678-1234-1234-1234-123456789012"',
name='"My New Agent Name"',
openai_key_uuid='"12345678-1234-1234-1234-123456789012"',
diff --git a/tests/api_resources/test_knowledge_bases.py b/tests/api_resources/test_knowledge_bases.py
index a42277e4..632951b4 100644
--- a/tests/api_resources/test_knowledge_bases.py
+++ b/tests/api_resources/test_knowledge_bases.py
@@ -15,6 +15,7 @@
KnowledgeBaseDeleteResponse,
KnowledgeBaseUpdateResponse,
KnowledgeBaseRetrieveResponse,
+ KnowledgeBaseListIndexingJobsResponse,
)
base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
@@ -54,6 +55,10 @@ def test_method_create_with_all_params(self, client: Gradient) -> None:
"size_in_bytes": "12345",
"stored_object_key": "example string",
},
+ "google_drive_data_source": {
+ "folder_id": "123e4567-e89b-12d3-a456-426614174000",
+ "refresh_token": "example string",
+ },
"item_path": "example string",
"spaces_data_source": {
"bucket_name": "example name",
@@ -64,6 +69,7 @@ def test_method_create_with_all_params(self, client: Gradient) -> None:
"base_url": "example string",
"crawling_option": "UNKNOWN",
"embed_media": True,
+ "exclude_tags": ["example string"],
},
}
],
@@ -371,6 +377,48 @@ def test_path_params_wait_for_database(self, client: Gradient) -> None:
"",
)
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_list_indexing_jobs(self, client: Gradient) -> None:
+ knowledge_base = client.knowledge_bases.list_indexing_jobs(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KnowledgeBaseListIndexingJobsResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_raw_response_list_indexing_jobs(self, client: Gradient) -> None:
+ response = client.knowledge_bases.with_raw_response.list_indexing_jobs(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ knowledge_base = response.parse()
+ assert_matches_type(KnowledgeBaseListIndexingJobsResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_streaming_response_list_indexing_jobs(self, client: Gradient) -> None:
+ with client.knowledge_bases.with_streaming_response.list_indexing_jobs(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ knowledge_base = response.parse()
+ assert_matches_type(KnowledgeBaseListIndexingJobsResponse, knowledge_base, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_path_params_list_indexing_jobs(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"):
+ client.knowledge_bases.with_raw_response.list_indexing_jobs(
+ "",
+ )
+
class TestAsyncKnowledgeBases:
parametrize = pytest.mark.parametrize(
@@ -408,6 +456,10 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradient)
"size_in_bytes": "12345",
"stored_object_key": "example string",
},
+ "google_drive_data_source": {
+ "folder_id": "123e4567-e89b-12d3-a456-426614174000",
+ "refresh_token": "example string",
+ },
"item_path": "example string",
"spaces_data_source": {
"bucket_name": "example name",
@@ -418,6 +470,7 @@ async def test_method_create_with_all_params(self, async_client: AsyncGradient)
"base_url": "example string",
"crawling_option": "UNKNOWN",
"embed_media": True,
+ "exclude_tags": ["example string"],
},
}
],
@@ -724,3 +777,45 @@ async def test_path_params_wait_for_database(self, async_client: AsyncGradient)
await async_client.knowledge_bases.wait_for_database(
"",
)
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_list_indexing_jobs(self, async_client: AsyncGradient) -> None:
+ knowledge_base = await async_client.knowledge_bases.list_indexing_jobs(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+ assert_matches_type(KnowledgeBaseListIndexingJobsResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_raw_response_list_indexing_jobs(self, async_client: AsyncGradient) -> None:
+ response = await async_client.knowledge_bases.with_raw_response.list_indexing_jobs(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ knowledge_base = await response.parse()
+ assert_matches_type(KnowledgeBaseListIndexingJobsResponse, knowledge_base, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_streaming_response_list_indexing_jobs(self, async_client: AsyncGradient) -> None:
+ async with async_client.knowledge_bases.with_streaming_response.list_indexing_jobs(
+ '"123e4567-e89b-12d3-a456-426614174000"',
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ knowledge_base = await response.parse()
+ assert_matches_type(KnowledgeBaseListIndexingJobsResponse, knowledge_base, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_path_params_list_indexing_jobs(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `knowledge_base_uuid` but received ''"):
+ await async_client.knowledge_bases.with_raw_response.list_indexing_jobs(
+ "",
+ )
diff --git a/tests/api_resources/test_nfs.py b/tests/api_resources/test_nfs.py
new file mode 100644
index 00000000..f2749330
--- /dev/null
+++ b/tests/api_resources/test_nfs.py
@@ -0,0 +1,611 @@
+# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
+
+from __future__ import annotations
+
+import os
+from typing import Any, cast
+
+import pytest
+
+from gradient import Gradient, AsyncGradient
+from tests.utils import assert_matches_type
+from gradient.types import (
+ NfListResponse,
+ NfCreateResponse,
+ NfRetrieveResponse,
+ NfInitiateActionResponse,
+)
+
+base_url = os.environ.get("TEST_API_BASE_URL", "http://127.0.0.1:4010")
+
+
+class TestNfs:
+ parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_create(self, client: Gradient) -> None:
+ nf = client.nfs.create(
+ name="sammy-share-drive",
+ region="atl1",
+ size_gib=1024,
+ vpc_ids=["796c6fe3-2a1d-4da2-9f3e-38239827dc91"],
+ )
+ assert_matches_type(NfCreateResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_raw_response_create(self, client: Gradient) -> None:
+ response = client.nfs.with_raw_response.create(
+ name="sammy-share-drive",
+ region="atl1",
+ size_gib=1024,
+ vpc_ids=["796c6fe3-2a1d-4da2-9f3e-38239827dc91"],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = response.parse()
+ assert_matches_type(NfCreateResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_streaming_response_create(self, client: Gradient) -> None:
+ with client.nfs.with_streaming_response.create(
+ name="sammy-share-drive",
+ region="atl1",
+ size_gib=1024,
+ vpc_ids=["796c6fe3-2a1d-4da2-9f3e-38239827dc91"],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = response.parse()
+ assert_matches_type(NfCreateResponse, nf, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_retrieve(self, client: Gradient) -> None:
+ nf = client.nfs.retrieve(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ )
+ assert_matches_type(NfRetrieveResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_raw_response_retrieve(self, client: Gradient) -> None:
+ response = client.nfs.with_raw_response.retrieve(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = response.parse()
+ assert_matches_type(NfRetrieveResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_streaming_response_retrieve(self, client: Gradient) -> None:
+ with client.nfs.with_streaming_response.retrieve(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = response.parse()
+ assert_matches_type(NfRetrieveResponse, nf, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_path_params_retrieve(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"):
+ client.nfs.with_raw_response.retrieve(
+ nfs_id="",
+ region="region",
+ )
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_list(self, client: Gradient) -> None:
+ nf = client.nfs.list(
+ region="region",
+ )
+ assert_matches_type(NfListResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_raw_response_list(self, client: Gradient) -> None:
+ response = client.nfs.with_raw_response.list(
+ region="region",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = response.parse()
+ assert_matches_type(NfListResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_streaming_response_list(self, client: Gradient) -> None:
+ with client.nfs.with_streaming_response.list(
+ region="region",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = response.parse()
+ assert_matches_type(NfListResponse, nf, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_delete(self, client: Gradient) -> None:
+ nf = client.nfs.delete(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ )
+ assert nf is None
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_raw_response_delete(self, client: Gradient) -> None:
+ response = client.nfs.with_raw_response.delete(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = response.parse()
+ assert nf is None
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_streaming_response_delete(self, client: Gradient) -> None:
+ with client.nfs.with_streaming_response.delete(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = response.parse()
+ assert nf is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_path_params_delete(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"):
+ client.nfs.with_raw_response.delete(
+ nfs_id="",
+ region="region",
+ )
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_initiate_action_overload_1(self, client: Gradient) -> None:
+ nf = client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_initiate_action_with_all_params_overload_1(self, client: Gradient) -> None:
+ nf = client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ params={"size_gib": 2048},
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_raw_response_initiate_action_overload_1(self, client: Gradient) -> None:
+ response = client.nfs.with_raw_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_streaming_response_initiate_action_overload_1(self, client: Gradient) -> None:
+ with client.nfs.with_streaming_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_path_params_initiate_action_overload_1(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"):
+ client.nfs.with_raw_response.initiate_action(
+ nfs_id="",
+ region="atl1",
+ type="resize",
+ )
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_initiate_action_overload_2(self, client: Gradient) -> None:
+ nf = client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_method_initiate_action_with_all_params_overload_2(self, client: Gradient) -> None:
+ nf = client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ params={"name": "daily-backup"},
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_raw_response_initiate_action_overload_2(self, client: Gradient) -> None:
+ response = client.nfs.with_raw_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_streaming_response_initiate_action_overload_2(self, client: Gradient) -> None:
+ with client.nfs.with_streaming_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ def test_path_params_initiate_action_overload_2(self, client: Gradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"):
+ client.nfs.with_raw_response.initiate_action(
+ nfs_id="",
+ region="atl1",
+ type="resize",
+ )
+
+
+class TestAsyncNfs:
+ parametrize = pytest.mark.parametrize(
+ "async_client", [False, True, {"http_client": "aiohttp"}], indirect=True, ids=["loose", "strict", "aiohttp"]
+ )
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_create(self, async_client: AsyncGradient) -> None:
+ nf = await async_client.nfs.create(
+ name="sammy-share-drive",
+ region="atl1",
+ size_gib=1024,
+ vpc_ids=["796c6fe3-2a1d-4da2-9f3e-38239827dc91"],
+ )
+ assert_matches_type(NfCreateResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_raw_response_create(self, async_client: AsyncGradient) -> None:
+ response = await async_client.nfs.with_raw_response.create(
+ name="sammy-share-drive",
+ region="atl1",
+ size_gib=1024,
+ vpc_ids=["796c6fe3-2a1d-4da2-9f3e-38239827dc91"],
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = await response.parse()
+ assert_matches_type(NfCreateResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_streaming_response_create(self, async_client: AsyncGradient) -> None:
+ async with async_client.nfs.with_streaming_response.create(
+ name="sammy-share-drive",
+ region="atl1",
+ size_gib=1024,
+ vpc_ids=["796c6fe3-2a1d-4da2-9f3e-38239827dc91"],
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = await response.parse()
+ assert_matches_type(NfCreateResponse, nf, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_retrieve(self, async_client: AsyncGradient) -> None:
+ nf = await async_client.nfs.retrieve(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ )
+ assert_matches_type(NfRetrieveResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_raw_response_retrieve(self, async_client: AsyncGradient) -> None:
+ response = await async_client.nfs.with_raw_response.retrieve(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = await response.parse()
+ assert_matches_type(NfRetrieveResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_streaming_response_retrieve(self, async_client: AsyncGradient) -> None:
+ async with async_client.nfs.with_streaming_response.retrieve(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = await response.parse()
+ assert_matches_type(NfRetrieveResponse, nf, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_path_params_retrieve(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"):
+ await async_client.nfs.with_raw_response.retrieve(
+ nfs_id="",
+ region="region",
+ )
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_list(self, async_client: AsyncGradient) -> None:
+ nf = await async_client.nfs.list(
+ region="region",
+ )
+ assert_matches_type(NfListResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_raw_response_list(self, async_client: AsyncGradient) -> None:
+ response = await async_client.nfs.with_raw_response.list(
+ region="region",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = await response.parse()
+ assert_matches_type(NfListResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_streaming_response_list(self, async_client: AsyncGradient) -> None:
+ async with async_client.nfs.with_streaming_response.list(
+ region="region",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = await response.parse()
+ assert_matches_type(NfListResponse, nf, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_delete(self, async_client: AsyncGradient) -> None:
+ nf = await async_client.nfs.delete(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ )
+ assert nf is None
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_raw_response_delete(self, async_client: AsyncGradient) -> None:
+ response = await async_client.nfs.with_raw_response.delete(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = await response.parse()
+ assert nf is None
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_streaming_response_delete(self, async_client: AsyncGradient) -> None:
+ async with async_client.nfs.with_streaming_response.delete(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="region",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = await response.parse()
+ assert nf is None
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_path_params_delete(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"):
+ await async_client.nfs.with_raw_response.delete(
+ nfs_id="",
+ region="region",
+ )
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_initiate_action_overload_1(self, async_client: AsyncGradient) -> None:
+ nf = await async_client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_initiate_action_with_all_params_overload_1(self, async_client: AsyncGradient) -> None:
+ nf = await async_client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ params={"size_gib": 2048},
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_raw_response_initiate_action_overload_1(self, async_client: AsyncGradient) -> None:
+ response = await async_client.nfs.with_raw_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = await response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_streaming_response_initiate_action_overload_1(self, async_client: AsyncGradient) -> None:
+ async with async_client.nfs.with_streaming_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = await response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_path_params_initiate_action_overload_1(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"):
+ await async_client.nfs.with_raw_response.initiate_action(
+ nfs_id="",
+ region="atl1",
+ type="resize",
+ )
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_initiate_action_overload_2(self, async_client: AsyncGradient) -> None:
+ nf = await async_client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_method_initiate_action_with_all_params_overload_2(self, async_client: AsyncGradient) -> None:
+ nf = await async_client.nfs.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ params={"name": "daily-backup"},
+ )
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_raw_response_initiate_action_overload_2(self, async_client: AsyncGradient) -> None:
+ response = await async_client.nfs.with_raw_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ )
+
+ assert response.is_closed is True
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+ nf = await response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_streaming_response_initiate_action_overload_2(self, async_client: AsyncGradient) -> None:
+ async with async_client.nfs.with_streaming_response.initiate_action(
+ nfs_id="0a1b2c3d-4e5f-6a7b-8c9d-0e1f2a3b4c5d",
+ region="atl1",
+ type="resize",
+ ) as response:
+ assert not response.is_closed
+ assert response.http_request.headers.get("X-Stainless-Lang") == "python"
+
+ nf = await response.parse()
+ assert_matches_type(NfInitiateActionResponse, nf, path=["response"])
+
+ assert cast(Any, response.is_closed) is True
+
+ @pytest.mark.skip(reason="Prism tests are disabled")
+ @parametrize
+ async def test_path_params_initiate_action_overload_2(self, async_client: AsyncGradient) -> None:
+ with pytest.raises(ValueError, match=r"Expected a non-empty value for `nfs_id` but received ''"):
+ await async_client.nfs.with_raw_response.initiate_action(
+ nfs_id="",
+ region="atl1",
+ type="resize",
+ )
diff --git a/tests/test_client.py b/tests/test_client.py
index 846c0bb6..c2406d77 100644
--- a/tests/test_client.py
+++ b/tests/test_client.py
@@ -59,32 +59,30 @@ def _low_retry_timeout(*_args: Any, **_kwargs: Any) -> float:
def _get_open_connections(client: Gradient | AsyncGradient) -> int:
transport = client._client._transport
- assert isinstance(transport, httpx.HTTPTransport) or isinstance(transport, httpx.AsyncHTTPTransport)
+ assert isinstance(transport, httpx.HTTPTransport) or isinstance(
+ transport, httpx.AsyncHTTPTransport
+ )
pool = transport._pool
return len(pool._requests)
class TestGradient:
- client = Gradient(
- base_url=base_url,
- access_token=access_token,
- model_access_key=model_access_key,
- agent_access_key=agent_access_key,
- _strict_response_validation=True,
- )
-
@pytest.mark.respx(base_url=base_url)
- def test_raw_response(self, respx_mock: MockRouter) -> None:
- respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
+ def test_raw_response(self, respx_mock: MockRouter, client: Gradient) -> None:
+ respx_mock.post("/foo").mock(
+ return_value=httpx.Response(200, json={"foo": "bar"})
+ )
- response = self.client.post("/foo", cast_to=httpx.Response)
+ response = client.post("/foo", cast_to=httpx.Response)
assert response.status_code == 200
assert isinstance(response, httpx.Response)
assert response.json() == {"foo": "bar"}
@pytest.mark.respx(base_url=base_url)
- def test_raw_response_for_binary(self, respx_mock: MockRouter) -> None:
+ def test_raw_response_for_binary(
+ self, respx_mock: MockRouter, client: Gradient
+ ) -> None:
respx_mock.post("/foo").mock(
return_value=httpx.Response(
200,
@@ -93,42 +91,48 @@ def test_raw_response_for_binary(self, respx_mock: MockRouter) -> None:
)
)
- response = self.client.post("/foo", cast_to=httpx.Response)
+ response = client.post("/foo", cast_to=httpx.Response)
assert response.status_code == 200
assert isinstance(response, httpx.Response)
assert response.json() == {"foo": "bar"}
- def test_copy(self) -> None:
- copied = self.client.copy()
- assert id(copied) != id(self.client)
+ def test_copy(self, client: Gradient) -> None:
+ copied = client.copy()
+ assert id(copied) != id(client)
- copied = self.client.copy(access_token="another My Access Token")
+ copied = client.copy(access_token="another My Access Token")
assert copied.access_token == "another My Access Token"
- assert self.client.access_token == "My Access Token"
+ assert client.access_token == "My Access Token"
- copied = self.client.copy(model_access_key="another My Model Access Key")
+ copied = client.copy(model_access_key="another My Model Access Key")
assert copied.model_access_key == "another My Model Access Key"
- assert self.client.model_access_key == "My Model Access Key"
+ assert client.model_access_key == "My Model Access Key"
- copied = self.client.copy(agent_access_key="another My Agent Access Key")
+ copied = client.copy(agent_access_key="another My Agent Access Key")
assert copied.agent_access_key == "another My Agent Access Key"
- assert self.client.agent_access_key == "My Agent Access Key"
+ assert client.agent_access_key == "My Agent Access Key"
def test_copy_default_options(self) -> None:
+ client = Gradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ )
# options that have a default are overridden correctly
- copied = self.client.copy(max_retries=7)
+ copied = client.copy(max_retries=7)
assert copied.max_retries == 7
- assert self.client.max_retries == 2
+ assert client.max_retries == 2
copied2 = copied.copy(max_retries=6)
assert copied2.max_retries == 6
assert copied.max_retries == 7
# timeout
- assert isinstance(self.client.timeout, httpx.Timeout)
- copied = self.client.copy(timeout=None)
+ assert isinstance(client.timeout, httpx.Timeout)
+ copied = client.copy(timeout=None)
assert copied.timeout is None
- assert isinstance(self.client.timeout, httpx.Timeout)
+ assert isinstance(client.timeout, httpx.Timeout)
def test_copy_default_headers(self) -> None:
client = Gradient(
@@ -168,6 +172,7 @@ def test_copy_default_headers(self) -> None:
match="`default_headers` and `set_default_headers` arguments are mutually exclusive",
):
client.copy(set_default_headers={}, default_headers={"X-Foo": "Bar"})
+ client.close()
def test_copy_default_query(self) -> None:
client = Gradient(
@@ -210,13 +215,15 @@ def test_copy_default_query(self) -> None:
):
client.copy(set_default_query={}, default_query={"foo": "Bar"})
- def test_copy_signature(self) -> None:
+ client.close()
+
+ def test_copy_signature(self, client: Gradient) -> None:
# ensure the same parameters that can be passed to the client are defined in the `.copy()` method
init_signature = inspect.signature(
# mypy doesn't like that we access the `__init__` property.
- self.client.__init__, # type: ignore[misc]
+ client.__init__, # type: ignore[misc]
)
- copy_signature = inspect.signature(self.client.copy)
+ copy_signature = inspect.signature(client.copy)
exclude_params = {"transport", "proxies", "_strict_response_validation"}
for name in init_signature.parameters.keys():
@@ -224,18 +231,26 @@ def test_copy_signature(self) -> None:
continue
copy_param = copy_signature.parameters.get(name)
- assert copy_param is not None, f"copy() signature is missing the {name} param"
+ assert (
+ copy_param is not None
+ ), f"copy() signature is missing the {name} param"
@pytest.mark.skipif(
sys.version_info >= (3, 10),
reason="fails because of a memory leak that started from 3.12",
)
def test_copy_build_request(self) -> None:
+ client = Gradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ )
options = FinalRequestOptions(method="get", url="/foo")
def build_request(options: FinalRequestOptions) -> None:
- client = self.client.copy()
- client._build_request(options)
+ client_copy = client.copy()
+ client_copy._build_request(options)
# ensure that the machinery is warmed up before tracing starts.
build_request(options)
@@ -254,7 +269,9 @@ def build_request(options: FinalRequestOptions) -> None:
tracemalloc.stop()
- def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.StatisticDiff) -> None:
+ def add_leak(
+ leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.StatisticDiff
+ ) -> None:
if diff.count == 0:
# Avoid false positives by considering only leaks (i.e. allocations that persist).
return
@@ -292,12 +309,12 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic
print(frame) # noqa: T201
raise AssertionError()
- def test_request_timeout(self) -> None:
- request = self.client._build_request(FinalRequestOptions(method="get", url="/foo"))
+ def test_request_timeout(self, client: Gradient) -> None:
+ request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
assert timeout == DEFAULT_TIMEOUT
- request = self.client._build_request(
+ request = client._build_request(
FinalRequestOptions(method="get", url="/foo", timeout=httpx.Timeout(100.0))
)
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
@@ -317,6 +334,8 @@ def test_client_timeout_option(self) -> None:
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
assert timeout == httpx.Timeout(0)
+ client.close()
+
def test_http_client_timeout_option(self) -> None:
# custom timeout given to the httpx client should be used
with httpx.Client(timeout=None) as http_client:
@@ -329,10 +348,14 @@ def test_http_client_timeout_option(self) -> None:
http_client=http_client,
)
- request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
+ request = client._build_request(
+ FinalRequestOptions(method="get", url="/foo")
+ )
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
assert timeout == httpx.Timeout(None)
+ client.close()
+
# no timeout given to the httpx client should not use the httpx default
with httpx.Client() as http_client:
client = Gradient(
@@ -344,10 +367,14 @@ def test_http_client_timeout_option(self) -> None:
http_client=http_client,
)
- request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
+ request = client._build_request(
+ FinalRequestOptions(method="get", url="/foo")
+ )
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
assert timeout == DEFAULT_TIMEOUT
+ client.close()
+
# explicitly passing the default timeout currently results in it being ignored
with httpx.Client(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client:
client = Gradient(
@@ -359,10 +386,14 @@ def test_http_client_timeout_option(self) -> None:
http_client=http_client,
)
- request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
+ request = client._build_request(
+ FinalRequestOptions(method="get", url="/foo")
+ )
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
assert timeout == DEFAULT_TIMEOUT # our default
+ client.close()
+
async def test_invalid_http_client(self) -> None:
with pytest.raises(TypeError, match="Invalid `http_client` arg"):
async with httpx.AsyncClient() as http_client:
@@ -376,7 +407,7 @@ async def test_invalid_http_client(self) -> None:
)
def test_default_headers_option(self) -> None:
- client = Gradient(
+ test_client = Gradient(
base_url=base_url,
access_token=access_token,
model_access_key=model_access_key,
@@ -384,11 +415,13 @@ def test_default_headers_option(self) -> None:
_strict_response_validation=True,
default_headers={"X-Foo": "bar"},
)
- request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
+ request = test_client._build_request(
+ FinalRequestOptions(method="get", url="/foo")
+ )
assert request.headers.get("x-foo") == "bar"
assert request.headers.get("x-stainless-lang") == "python"
- client2 = Gradient(
+ test_client2 = Gradient(
base_url=base_url,
access_token=access_token,
model_access_key=model_access_key,
@@ -399,10 +432,15 @@ def test_default_headers_option(self) -> None:
"X-Stainless-Lang": "my-overriding-header",
},
)
- request = client2._build_request(FinalRequestOptions(method="get", url="/foo"))
+ request = test_client2._build_request(
+ FinalRequestOptions(method="get", url="/foo")
+ )
assert request.headers.get("x-foo") == "stainless"
assert request.headers.get("x-stainless-lang") == "my-overriding-header"
+ test_client.close()
+ test_client2.close()
+
def test_validate_headers(self) -> None:
client = Gradient(
base_url=base_url,
@@ -415,7 +453,11 @@ def test_validate_headers(self) -> None:
assert request.headers.get("Authorization") == f"Bearer {access_token}"
with update_env(
- **{"DIGITALOCEAN_ACCESS_TOKEN": Omit(), "MODEL_ACCESS_KEY": Omit(), "AGENT_ACCESS_KEY": Omit()}
+ **{
+ "DIGITALOCEAN_ACCESS_TOKEN": Omit(),
+ "MODEL_ACCESS_KEY": Omit(),
+ "AGENT_ACCESS_KEY": Omit(),
+ }
):
client2 = Gradient(
base_url=base_url,
@@ -432,7 +474,9 @@ def test_validate_headers(self) -> None:
client2._build_request(FinalRequestOptions(method="get", url="/foo"))
request2 = client2._build_request(
- FinalRequestOptions(method="get", url="/foo", headers={"Authorization": Omit()})
+ FinalRequestOptions(
+ method="get", url="/foo", headers={"Authorization": Omit()}
+ )
)
assert request2.headers.get("Authorization") is None
@@ -459,8 +503,10 @@ def test_default_query_option(self) -> None:
url = httpx.URL(request.url)
assert dict(url.params) == {"foo": "baz", "query_param": "overridden"}
- def test_request_extra_json(self) -> None:
- request = self.client._build_request(
+ client.close()
+
+ def test_request_extra_json(self, client: Gradient) -> None:
+ request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -471,7 +517,7 @@ def test_request_extra_json(self) -> None:
data = json.loads(request.content.decode("utf-8"))
assert data == {"foo": "bar", "baz": False}
- request = self.client._build_request(
+ request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -482,7 +528,7 @@ def test_request_extra_json(self) -> None:
assert data == {"baz": False}
# `extra_json` takes priority over `json_data` when keys clash
- request = self.client._build_request(
+ request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -493,8 +539,8 @@ def test_request_extra_json(self) -> None:
data = json.loads(request.content.decode("utf-8"))
assert data == {"foo": "bar", "baz": None}
- def test_request_extra_headers(self) -> None:
- request = self.client._build_request(
+ def test_request_extra_headers(self, client: Gradient) -> None:
+ request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -504,7 +550,7 @@ def test_request_extra_headers(self) -> None:
assert request.headers.get("X-Foo") == "Foo"
# `extra_headers` takes priority over `default_headers` when keys clash
- request = self.client.with_options(default_headers={"X-Bar": "true"})._build_request(
+ request = client.with_options(default_headers={"X-Bar": "true"})._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -515,8 +561,8 @@ def test_request_extra_headers(self) -> None:
)
assert request.headers.get("X-Bar") == "false"
- def test_request_extra_query(self) -> None:
- request = self.client._build_request(
+ def test_request_extra_query(self, client: Gradient) -> None:
+ request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -529,7 +575,7 @@ def test_request_extra_query(self) -> None:
assert params == {"my_query_param": "Foo"}
# if both `query` and `extra_query` are given, they are merged
- request = self.client._build_request(
+ request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -543,7 +589,7 @@ def test_request_extra_query(self) -> None:
assert params == {"bar": "1", "foo": "2"}
# `extra_query` takes priority over `query` when keys clash
- request = self.client._build_request(
+ request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -561,7 +607,9 @@ def test_multipart_repeating_array(self, client: Gradient) -> None:
FinalRequestOptions.construct(
method="post",
url="/foo",
- headers={"Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82"},
+ headers={
+ "Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82"
+ },
json_data={"array": ["foo", "bar"]},
files=[("foo.txt", b"hello world")],
)
@@ -586,21 +634,27 @@ def test_multipart_repeating_array(self, client: Gradient) -> None:
]
@pytest.mark.respx(base_url=base_url)
- def test_basic_union_response(self, respx_mock: MockRouter) -> None:
+ def test_basic_union_response(
+ self, respx_mock: MockRouter, client: Gradient
+ ) -> None:
class Model1(BaseModel):
name: str
class Model2(BaseModel):
foo: str
- respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
+ respx_mock.get("/foo").mock(
+ return_value=httpx.Response(200, json={"foo": "bar"})
+ )
- response = self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2]))
+ response = client.get("/foo", cast_to=cast(Any, Union[Model1, Model2]))
assert isinstance(response, Model2)
assert response.foo == "bar"
@pytest.mark.respx(base_url=base_url)
- def test_union_response_different_types(self, respx_mock: MockRouter) -> None:
+ def test_union_response_different_types(
+ self, respx_mock: MockRouter, client: Gradient
+ ) -> None:
"""Union of objects with the same field name using a different type"""
class Model1(BaseModel):
@@ -609,20 +663,24 @@ class Model1(BaseModel):
class Model2(BaseModel):
foo: str
- respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
+ respx_mock.get("/foo").mock(
+ return_value=httpx.Response(200, json={"foo": "bar"})
+ )
- response = self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2]))
+ response = client.get("/foo", cast_to=cast(Any, Union[Model1, Model2]))
assert isinstance(response, Model2)
assert response.foo == "bar"
respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": 1}))
- response = self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2]))
+ response = client.get("/foo", cast_to=cast(Any, Union[Model1, Model2]))
assert isinstance(response, Model1)
assert response.foo == 1
@pytest.mark.respx(base_url=base_url)
- def test_non_application_json_content_type_for_json_data(self, respx_mock: MockRouter) -> None:
+ def test_non_application_json_content_type_for_json_data(
+ self, respx_mock: MockRouter, client: Gradient
+ ) -> None:
"""
Response that sets Content-Type to something other than application/json but returns json data
"""
@@ -638,7 +696,7 @@ class Model(BaseModel):
)
)
- response = self.client.get("/foo", cast_to=Model)
+ response = client.get("/foo", cast_to=Model)
assert isinstance(response, Model)
assert response.foo == 2
@@ -656,6 +714,8 @@ def test_base_url_setter(self) -> None:
assert client.base_url == "https://example.com/from_setter/"
+ client.close()
+
def test_base_url_env(self) -> None:
with update_env(GRADIENT_BASE_URL="http://localhost:5000/from/env"):
client = Gradient(
@@ -696,6 +756,7 @@ def test_base_url_trailing_slash(self, client: Gradient) -> None:
),
)
assert request.url == "http://localhost:5000/custom/path/foo"
+ client.close()
@pytest.mark.parametrize(
"client",
@@ -727,6 +788,7 @@ def test_base_url_no_trailing_slash(self, client: Gradient) -> None:
),
)
assert request.url == "http://localhost:5000/custom/path/foo"
+ client.close()
@pytest.mark.parametrize(
"client",
@@ -758,47 +820,48 @@ def test_absolute_request_url(self, client: Gradient) -> None:
),
)
assert request.url == "https://myapi.com/foo"
+ client.close()
def test_copied_client_does_not_close_http(self) -> None:
- client = Gradient(
+ test_client = Gradient(
base_url=base_url,
access_token=access_token,
- model_access_key=model_access_key,
- agent_access_key=agent_access_key,
_strict_response_validation=True,
)
- assert not client.is_closed()
+ assert not test_client.is_closed()
- copied = client.copy()
- assert copied is not client
+ copied = test_client.copy()
+ assert copied is not test_client
del copied
- assert not client.is_closed()
+ assert not test_client.is_closed()
def test_client_context_manager(self) -> None:
- client = Gradient(
+ test_client = Gradient(
base_url=base_url,
access_token=access_token,
- model_access_key=model_access_key,
- agent_access_key=agent_access_key,
_strict_response_validation=True,
)
- with client as c2:
- assert c2 is client
+ with test_client as c2:
+ assert c2 is test_client
assert not c2.is_closed()
- assert not client.is_closed()
- assert client.is_closed()
+ assert not test_client.is_closed()
+ assert test_client.is_closed()
@pytest.mark.respx(base_url=base_url)
- def test_client_response_validation_error(self, respx_mock: MockRouter) -> None:
+ def test_client_response_validation_error(
+ self, respx_mock: MockRouter, client: Gradient
+ ) -> None:
class Model(BaseModel):
foo: str
- respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": {"invalid": True}}))
+ respx_mock.get("/foo").mock(
+ return_value=httpx.Response(200, json={"foo": {"invalid": True}})
+ )
with pytest.raises(APIResponseValidationError) as exc:
- self.client.get("/foo", cast_to=Model)
+ client.get("/foo", cast_to=Model)
assert isinstance(exc.value.__cause__, ValidationError)
@@ -814,13 +877,17 @@ def test_client_max_retries_validation(self) -> None:
)
@pytest.mark.respx(base_url=base_url)
- def test_default_stream_cls(self, respx_mock: MockRouter) -> None:
+ def test_default_stream_cls(self, respx_mock: MockRouter, client: Gradient) -> None:
class Model(BaseModel):
name: str
- respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
+ respx_mock.post("/foo").mock(
+ return_value=httpx.Response(200, json={"foo": "bar"})
+ )
- stream = self.client.post("/foo", cast_to=Model, stream=True, stream_cls=Stream[Model])
+ stream = client.post(
+ "/foo", cast_to=Model, stream=True, stream_cls=Stream[Model]
+ )
assert isinstance(stream, Stream)
stream.response.close()
@@ -829,7 +896,9 @@ def test_received_text_for_expected_json(self, respx_mock: MockRouter) -> None:
class Model(BaseModel):
name: str
- respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format"))
+ respx_mock.get("/foo").mock(
+ return_value=httpx.Response(200, text="my-custom-format")
+ )
strict_client = Gradient(
base_url=base_url,
@@ -842,17 +911,18 @@ class Model(BaseModel):
with pytest.raises(APIResponseValidationError):
strict_client.get("/foo", cast_to=Model)
- client = Gradient(
+ non_strict_client = Gradient(
base_url=base_url,
access_token=access_token,
- model_access_key=model_access_key,
- agent_access_key=agent_access_key,
_strict_response_validation=False,
)
- response = client.get("/foo", cast_to=Model)
+ response = non_strict_client.get("/foo", cast_to=Model)
assert isinstance(response, str) # type: ignore[unreachable]
+ strict_client.close()
+ non_strict_client.close()
+
@pytest.mark.parametrize(
"remaining_retries,retry_after,timeout",
[
@@ -875,7 +945,9 @@ class Model(BaseModel):
],
)
@mock.patch("time.time", mock.MagicMock(return_value=1696004797))
- def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None:
+ def test_parse_retry_after_header(
+ self, remaining_retries: int, retry_after: str, timeout: float
+ ) -> None:
client = Gradient(
base_url=base_url,
access_token=access_token,
@@ -886,13 +958,21 @@ def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str
headers = httpx.Headers({"retry-after": retry_after})
options = FinalRequestOptions(method="get", url="/foo", max_retries=3)
- calculated = client._calculate_retry_timeout(remaining_retries, options, headers)
- assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType]
+ calculated = client._calculate_retry_timeout(
+ remaining_retries, options, headers
+ )
+ assert calculated == pytest.approx(timeout, rel=0.5 * 0.875) # type: ignore[misc]
- @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch(
+ "gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout
+ )
@pytest.mark.respx(base_url=base_url)
- def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, client: Gradient) -> None:
- respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error"))
+ def test_retrying_timeout_errors_doesnt_leak(
+ self, respx_mock: MockRouter, client: Gradient
+ ) -> None:
+ respx_mock.post("/chat/completions").mock(
+ side_effect=httpx.TimeoutException("Test timeout error")
+ )
with pytest.raises(APITimeoutError):
client.chat.completions.with_streaming_response.create(
@@ -905,11 +985,15 @@ def test_retrying_timeout_errors_doesnt_leak(self, respx_mock: MockRouter, clien
model="llama3-8b-instruct",
).__enter__()
- assert _get_open_connections(self.client) == 0
+ assert _get_open_connections(client) == 0
- @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch(
+ "gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout
+ )
@pytest.mark.respx(base_url=base_url)
- def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client: Gradient) -> None:
+ def test_retrying_status_errors_doesnt_leak(
+ self, respx_mock: MockRouter, client: Gradient
+ ) -> None:
respx_mock.post("/chat/completions").mock(return_value=httpx.Response(500))
with pytest.raises(APIStatusError):
@@ -922,10 +1006,12 @@ def test_retrying_status_errors_doesnt_leak(self, respx_mock: MockRouter, client
],
model="llama3-8b-instruct",
).__enter__()
- assert _get_open_connections(self.client) == 0
+ assert _get_open_connections(client) == 0
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
- @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch(
+ "gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout
+ )
@pytest.mark.respx(base_url=base_url)
@pytest.mark.parametrize("failure_mode", ["status", "exception"])
def test_retries_taken(
@@ -961,10 +1047,15 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
)
assert response.retries_taken == failures_before_success
- assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success
+ assert (
+ int(response.http_request.headers.get("x-stainless-retry-count"))
+ == failures_before_success
+ )
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
- @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch(
+ "gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout
+ )
@pytest.mark.respx(base_url=base_url)
def test_omit_retry_count_header(
self, client: Gradient, failures_before_success: int, respx_mock: MockRouter
@@ -993,10 +1084,14 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
extra_headers={"x-stainless-retry-count": Omit()},
)
- assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0
+ assert (
+ len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0
+ )
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
- @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch(
+ "gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout
+ )
@pytest.mark.respx(base_url=base_url)
def test_overwrite_retry_count_header(
self, client: Gradient, failures_before_success: int, respx_mock: MockRouter
@@ -1050,26 +1145,36 @@ def test_default_client_creation(self) -> None:
)
@pytest.mark.respx(base_url=base_url)
- def test_follow_redirects(self, respx_mock: MockRouter) -> None:
+ def test_follow_redirects(self, respx_mock: MockRouter, client: Gradient) -> None:
# Test that the default follow_redirects=True allows following redirects
respx_mock.post("/redirect").mock(
- return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"})
+ return_value=httpx.Response(
+ 302, headers={"Location": f"{base_url}/redirected"}
+ )
+ )
+ respx_mock.get("/redirected").mock(
+ return_value=httpx.Response(200, json={"status": "ok"})
)
- respx_mock.get("/redirected").mock(return_value=httpx.Response(200, json={"status": "ok"}))
- response = self.client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response)
+ response = client.post(
+ "/redirect", body={"key": "value"}, cast_to=httpx.Response
+ )
assert response.status_code == 200
assert response.json() == {"status": "ok"}
@pytest.mark.respx(base_url=base_url)
- def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None:
+ def test_follow_redirects_disabled(
+ self, respx_mock: MockRouter, client: Gradient
+ ) -> None:
# Test that follow_redirects=False prevents following redirects
respx_mock.post("/redirect").mock(
- return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"})
+ return_value=httpx.Response(
+ 302, headers={"Location": f"{base_url}/redirected"}
+ )
)
with pytest.raises(APIStatusError) as exc_info:
- self.client.post(
+ client.post(
"/redirect",
body={"key": "value"},
options={"follow_redirects": False},
@@ -1081,27 +1186,23 @@ def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None:
class TestAsyncGradient:
- client = AsyncGradient(
- base_url=base_url,
- access_token=access_token,
- model_access_key=model_access_key,
- agent_access_key=agent_access_key,
- _strict_response_validation=True,
- )
-
@pytest.mark.respx(base_url=base_url)
- @pytest.mark.asyncio
- async def test_raw_response(self, respx_mock: MockRouter) -> None:
- respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
+ async def test_raw_response(
+ self, respx_mock: MockRouter, async_client: AsyncGradient
+ ) -> None:
+ respx_mock.post("/foo").mock(
+ return_value=httpx.Response(200, json={"foo": "bar"})
+ )
- response = await self.client.post("/foo", cast_to=httpx.Response)
+ response = await async_client.post("/foo", cast_to=httpx.Response)
assert response.status_code == 200
assert isinstance(response, httpx.Response)
assert response.json() == {"foo": "bar"}
@pytest.mark.respx(base_url=base_url)
- @pytest.mark.asyncio
- async def test_raw_response_for_binary(self, respx_mock: MockRouter) -> None:
+ async def test_raw_response_for_binary(
+ self, respx_mock: MockRouter, async_client: AsyncGradient
+ ) -> None:
respx_mock.post("/foo").mock(
return_value=httpx.Response(
200,
@@ -1110,44 +1211,50 @@ async def test_raw_response_for_binary(self, respx_mock: MockRouter) -> None:
)
)
- response = await self.client.post("/foo", cast_to=httpx.Response)
+ response = await async_client.post("/foo", cast_to=httpx.Response)
assert response.status_code == 200
assert isinstance(response, httpx.Response)
assert response.json() == {"foo": "bar"}
- def test_copy(self) -> None:
- copied = self.client.copy()
- assert id(copied) != id(self.client)
+ def test_copy(self, async_client: AsyncGradient) -> None:
+ copied = async_client.copy()
+ assert id(copied) != id(async_client)
- copied = self.client.copy(access_token="another My Access Token")
+ copied = async_client.copy(access_token="another My Access Token")
assert copied.access_token == "another My Access Token"
- assert self.client.access_token == "My Access Token"
+ assert async_client.access_token == "My Access Token"
- copied = self.client.copy(model_access_key="another My Model Access Key")
+ copied = async_client.copy(model_access_key="another My Model Access Key")
assert copied.model_access_key == "another My Model Access Key"
- assert self.client.model_access_key == "My Model Access Key"
+ assert async_client.model_access_key == "My Model Access Key"
- copied = self.client.copy(agent_access_key="another My Agent Access Key")
+ copied = async_client.copy(agent_access_key="another My Agent Access Key")
assert copied.agent_access_key == "another My Agent Access Key"
- assert self.client.agent_access_key == "My Agent Access Key"
+ assert async_client.agent_access_key == "My Agent Access Key"
def test_copy_default_options(self) -> None:
+ async_client = AsyncGradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ )
# options that have a default are overridden correctly
- copied = self.client.copy(max_retries=7)
+ copied = async_client.copy(max_retries=7)
assert copied.max_retries == 7
- assert self.client.max_retries == 2
+ assert async_client.max_retries == 2
copied2 = copied.copy(max_retries=6)
assert copied2.max_retries == 6
assert copied.max_retries == 7
# timeout
- assert isinstance(self.client.timeout, httpx.Timeout)
- copied = self.client.copy(timeout=None)
+ assert isinstance(async_client.timeout, httpx.Timeout)
+ copied = async_client.copy(timeout=None)
assert copied.timeout is None
- assert isinstance(self.client.timeout, httpx.Timeout)
+ assert isinstance(async_client.timeout, httpx.Timeout)
- def test_copy_default_headers(self) -> None:
+ async def test_copy_default_headers(self) -> None:
client = AsyncGradient(
base_url=base_url,
access_token=access_token,
@@ -1185,8 +1292,9 @@ def test_copy_default_headers(self) -> None:
match="`default_headers` and `set_default_headers` arguments are mutually exclusive",
):
client.copy(set_default_headers={}, default_headers={"X-Foo": "Bar"})
+ await client.close()
- def test_copy_default_query(self) -> None:
+ async def test_copy_default_query(self) -> None:
client = AsyncGradient(
base_url=base_url,
access_token=access_token,
@@ -1227,13 +1335,15 @@ def test_copy_default_query(self) -> None:
):
client.copy(set_default_query={}, default_query={"foo": "Bar"})
- def test_copy_signature(self) -> None:
+ await client.close()
+
+ def test_copy_signature(self, async_client: AsyncGradient) -> None:
# ensure the same parameters that can be passed to the client are defined in the `.copy()` method
init_signature = inspect.signature(
# mypy doesn't like that we access the `__init__` property.
- self.client.__init__, # type: ignore[misc]
+ async_client.__init__, # type: ignore[misc]
)
- copy_signature = inspect.signature(self.client.copy)
+ copy_signature = inspect.signature(async_client.copy)
exclude_params = {"transport", "proxies", "_strict_response_validation"}
for name in init_signature.parameters.keys():
@@ -1241,18 +1351,26 @@ def test_copy_signature(self) -> None:
continue
copy_param = copy_signature.parameters.get(name)
- assert copy_param is not None, f"copy() signature is missing the {name} param"
+ assert (
+ copy_param is not None
+ ), f"copy() signature is missing the {name} param"
@pytest.mark.skipif(
sys.version_info >= (3, 10),
reason="fails because of a memory leak that started from 3.12",
)
def test_copy_build_request(self) -> None:
+ async_client = AsyncGradient(
+ base_url=base_url,
+ access_token=access_token,
+ model_access_key=model_access_key,
+ agent_access_key=agent_access_key,
+ )
options = FinalRequestOptions(method="get", url="/foo")
def build_request(options: FinalRequestOptions) -> None:
- client = self.client.copy()
- client._build_request(options)
+ client_copy = async_client.copy()
+ client_copy._build_request(options)
# ensure that the machinery is warmed up before tracing starts.
build_request(options)
@@ -1271,7 +1389,9 @@ def build_request(options: FinalRequestOptions) -> None:
tracemalloc.stop()
- def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.StatisticDiff) -> None:
+ def add_leak(
+ leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.StatisticDiff
+ ) -> None:
if diff.count == 0:
# Avoid false positives by considering only leaks (i.e. allocations that persist).
return
@@ -1309,12 +1429,14 @@ def add_leak(leaks: list[tracemalloc.StatisticDiff], diff: tracemalloc.Statistic
print(frame) # noqa: T201
raise AssertionError()
- async def test_request_timeout(self) -> None:
- request = self.client._build_request(FinalRequestOptions(method="get", url="/foo"))
+ async def test_request_timeout(self, async_client: AsyncGradient) -> None:
+ request = async_client._build_request(
+ FinalRequestOptions(method="get", url="/foo")
+ )
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
assert timeout == DEFAULT_TIMEOUT
- request = self.client._build_request(
+ request = async_client._build_request(
FinalRequestOptions(method="get", url="/foo", timeout=httpx.Timeout(100.0))
)
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
@@ -1334,6 +1456,8 @@ async def test_client_timeout_option(self) -> None:
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
assert timeout == httpx.Timeout(0)
+ await client.close()
+
async def test_http_client_timeout_option(self) -> None:
# custom timeout given to the httpx client should be used
async with httpx.AsyncClient(timeout=None) as http_client:
@@ -1346,10 +1470,14 @@ async def test_http_client_timeout_option(self) -> None:
http_client=http_client,
)
- request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
+ request = client._build_request(
+ FinalRequestOptions(method="get", url="/foo")
+ )
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
assert timeout == httpx.Timeout(None)
+ await client.close()
+
# no timeout given to the httpx client should not use the httpx default
async with httpx.AsyncClient() as http_client:
client = AsyncGradient(
@@ -1361,10 +1489,14 @@ async def test_http_client_timeout_option(self) -> None:
http_client=http_client,
)
- request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
+ request = client._build_request(
+ FinalRequestOptions(method="get", url="/foo")
+ )
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
assert timeout == DEFAULT_TIMEOUT
+ await client.close()
+
# explicitly passing the default timeout currently results in it being ignored
async with httpx.AsyncClient(timeout=HTTPX_DEFAULT_TIMEOUT) as http_client:
client = AsyncGradient(
@@ -1376,10 +1508,14 @@ async def test_http_client_timeout_option(self) -> None:
http_client=http_client,
)
- request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
+ request = client._build_request(
+ FinalRequestOptions(method="get", url="/foo")
+ )
timeout = httpx.Timeout(**request.extensions["timeout"]) # type: ignore
assert timeout == DEFAULT_TIMEOUT # our default
+ await client.close()
+
def test_invalid_http_client(self) -> None:
with pytest.raises(TypeError, match="Invalid `http_client` arg"):
with httpx.Client() as http_client:
@@ -1392,8 +1528,8 @@ def test_invalid_http_client(self) -> None:
http_client=cast(Any, http_client),
)
- def test_default_headers_option(self) -> None:
- client = AsyncGradient(
+ async def test_default_headers_option(self) -> None:
+ test_client = AsyncGradient(
base_url=base_url,
access_token=access_token,
model_access_key=model_access_key,
@@ -1401,11 +1537,13 @@ def test_default_headers_option(self) -> None:
_strict_response_validation=True,
default_headers={"X-Foo": "bar"},
)
- request = client._build_request(FinalRequestOptions(method="get", url="/foo"))
+ request = test_client._build_request(
+ FinalRequestOptions(method="get", url="/foo")
+ )
assert request.headers.get("x-foo") == "bar"
assert request.headers.get("x-stainless-lang") == "python"
- client2 = AsyncGradient(
+ test_client2 = AsyncGradient(
base_url=base_url,
access_token=access_token,
model_access_key=model_access_key,
@@ -1416,10 +1554,15 @@ def test_default_headers_option(self) -> None:
"X-Stainless-Lang": "my-overriding-header",
},
)
- request = client2._build_request(FinalRequestOptions(method="get", url="/foo"))
+ request = test_client2._build_request(
+ FinalRequestOptions(method="get", url="/foo")
+ )
assert request.headers.get("x-foo") == "stainless"
assert request.headers.get("x-stainless-lang") == "my-overriding-header"
+ await test_client.close()
+ await test_client2.close()
+
def test_validate_headers(self) -> None:
client = AsyncGradient(
base_url=base_url,
@@ -1432,7 +1575,11 @@ def test_validate_headers(self) -> None:
assert request.headers.get("Authorization") == f"Bearer {access_token}"
with update_env(
- **{"DIGITALOCEAN_ACCESS_TOKEN": Omit(), "MODEL_ACCESS_KEY": Omit(), "AGENT_ACCESS_KEY": Omit()}
+ **{
+ "DIGITALOCEAN_ACCESS_TOKEN": Omit(),
+ "MODEL_ACCESS_KEY": Omit(),
+ "AGENT_ACCESS_KEY": Omit(),
+ }
):
client2 = AsyncGradient(
base_url=base_url,
@@ -1449,11 +1596,13 @@ def test_validate_headers(self) -> None:
client2._build_request(FinalRequestOptions(method="get", url="/foo"))
request2 = client2._build_request(
- FinalRequestOptions(method="get", url="/foo", headers={"Authorization": Omit()})
+ FinalRequestOptions(
+ method="get", url="/foo", headers={"Authorization": Omit()}
+ )
)
assert request2.headers.get("Authorization") is None
- def test_default_query_option(self) -> None:
+ async def test_default_query_option(self) -> None:
client = AsyncGradient(
base_url=base_url,
access_token=access_token,
@@ -1476,8 +1625,10 @@ def test_default_query_option(self) -> None:
url = httpx.URL(request.url)
assert dict(url.params) == {"foo": "baz", "query_param": "overridden"}
- def test_request_extra_json(self) -> None:
- request = self.client._build_request(
+ await client.close()
+
+ def test_request_extra_json(self, client: Gradient) -> None:
+ request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -1488,7 +1639,7 @@ def test_request_extra_json(self) -> None:
data = json.loads(request.content.decode("utf-8"))
assert data == {"foo": "bar", "baz": False}
- request = self.client._build_request(
+ request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -1499,7 +1650,7 @@ def test_request_extra_json(self) -> None:
assert data == {"baz": False}
# `extra_json` takes priority over `json_data` when keys clash
- request = self.client._build_request(
+ request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -1510,8 +1661,8 @@ def test_request_extra_json(self) -> None:
data = json.loads(request.content.decode("utf-8"))
assert data == {"foo": "bar", "baz": None}
- def test_request_extra_headers(self) -> None:
- request = self.client._build_request(
+ def test_request_extra_headers(self, client: Gradient) -> None:
+ request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -1521,7 +1672,7 @@ def test_request_extra_headers(self) -> None:
assert request.headers.get("X-Foo") == "Foo"
# `extra_headers` takes priority over `default_headers` when keys clash
- request = self.client.with_options(default_headers={"X-Bar": "true"})._build_request(
+ request = client.with_options(default_headers={"X-Bar": "true"})._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -1532,8 +1683,8 @@ def test_request_extra_headers(self) -> None:
)
assert request.headers.get("X-Bar") == "false"
- def test_request_extra_query(self) -> None:
- request = self.client._build_request(
+ def test_request_extra_query(self, client: Gradient) -> None:
+ request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -1546,7 +1697,7 @@ def test_request_extra_query(self) -> None:
assert params == {"my_query_param": "Foo"}
# if both `query` and `extra_query` are given, they are merged
- request = self.client._build_request(
+ request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -1560,7 +1711,7 @@ def test_request_extra_query(self) -> None:
assert params == {"bar": "1", "foo": "2"}
# `extra_query` takes priority over `query` when keys clash
- request = self.client._build_request(
+ request = client._build_request(
FinalRequestOptions(
method="post",
url="/foo",
@@ -1578,7 +1729,9 @@ def test_multipart_repeating_array(self, async_client: AsyncGradient) -> None:
FinalRequestOptions.construct(
method="post",
url="/foo",
- headers={"Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82"},
+ headers={
+ "Content-Type": "multipart/form-data; boundary=6b7ba517decee4a450543ea6ae821c82"
+ },
json_data={"array": ["foo", "bar"]},
files=[("foo.txt", b"hello world")],
)
@@ -1603,21 +1756,29 @@ def test_multipart_repeating_array(self, async_client: AsyncGradient) -> None:
]
@pytest.mark.respx(base_url=base_url)
- async def test_basic_union_response(self, respx_mock: MockRouter) -> None:
+ async def test_basic_union_response(
+ self, respx_mock: MockRouter, async_client: AsyncGradient
+ ) -> None:
class Model1(BaseModel):
name: str
class Model2(BaseModel):
foo: str
- respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
+ respx_mock.get("/foo").mock(
+ return_value=httpx.Response(200, json={"foo": "bar"})
+ )
- response = await self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2]))
+ response = await async_client.get(
+ "/foo", cast_to=cast(Any, Union[Model1, Model2])
+ )
assert isinstance(response, Model2)
assert response.foo == "bar"
@pytest.mark.respx(base_url=base_url)
- async def test_union_response_different_types(self, respx_mock: MockRouter) -> None:
+ async def test_union_response_different_types(
+ self, respx_mock: MockRouter, async_client: AsyncGradient
+ ) -> None:
"""Union of objects with the same field name using a different type"""
class Model1(BaseModel):
@@ -1626,20 +1787,28 @@ class Model1(BaseModel):
class Model2(BaseModel):
foo: str
- respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
+ respx_mock.get("/foo").mock(
+ return_value=httpx.Response(200, json={"foo": "bar"})
+ )
- response = await self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2]))
+ response = await async_client.get(
+ "/foo", cast_to=cast(Any, Union[Model1, Model2])
+ )
assert isinstance(response, Model2)
assert response.foo == "bar"
respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": 1}))
- response = await self.client.get("/foo", cast_to=cast(Any, Union[Model1, Model2]))
+ response = await async_client.get(
+ "/foo", cast_to=cast(Any, Union[Model1, Model2])
+ )
assert isinstance(response, Model1)
assert response.foo == 1
@pytest.mark.respx(base_url=base_url)
- async def test_non_application_json_content_type_for_json_data(self, respx_mock: MockRouter) -> None:
+ async def test_non_application_json_content_type_for_json_data(
+ self, respx_mock: MockRouter, async_client: AsyncGradient
+ ) -> None:
"""
Response that sets Content-Type to something other than application/json but returns json data
"""
@@ -1655,11 +1824,11 @@ class Model(BaseModel):
)
)
- response = await self.client.get("/foo", cast_to=Model)
+ response = await async_client.get("/foo", cast_to=Model)
assert isinstance(response, Model)
assert response.foo == 2
- def test_base_url_setter(self) -> None:
+ async def test_base_url_setter(self) -> None:
client = AsyncGradient(
base_url="https://example.com/from_init",
access_token=access_token,
@@ -1673,7 +1842,9 @@ def test_base_url_setter(self) -> None:
assert client.base_url == "https://example.com/from_setter/"
- def test_base_url_env(self) -> None:
+ await client.close()
+
+ async def test_base_url_env(self) -> None:
with update_env(GRADIENT_BASE_URL="http://localhost:5000/from/env"):
client = AsyncGradient(
access_token=access_token,
@@ -1704,7 +1875,7 @@ def test_base_url_env(self) -> None:
],
ids=["standard", "custom http client"],
)
- def test_base_url_trailing_slash(self, client: AsyncGradient) -> None:
+ async def test_base_url_trailing_slash(self, client: AsyncGradient) -> None:
request = client._build_request(
FinalRequestOptions(
method="post",
@@ -1713,6 +1884,7 @@ def test_base_url_trailing_slash(self, client: AsyncGradient) -> None:
),
)
assert request.url == "http://localhost:5000/custom/path/foo"
+ await client.close()
@pytest.mark.parametrize(
"client",
@@ -1735,7 +1907,7 @@ def test_base_url_trailing_slash(self, client: AsyncGradient) -> None:
],
ids=["standard", "custom http client"],
)
- def test_base_url_no_trailing_slash(self, client: AsyncGradient) -> None:
+ async def test_base_url_no_trailing_slash(self, client: AsyncGradient) -> None:
request = client._build_request(
FinalRequestOptions(
method="post",
@@ -1744,6 +1916,7 @@ def test_base_url_no_trailing_slash(self, client: AsyncGradient) -> None:
),
)
assert request.url == "http://localhost:5000/custom/path/foo"
+ await client.close()
@pytest.mark.parametrize(
"client",
@@ -1766,7 +1939,7 @@ def test_base_url_no_trailing_slash(self, client: AsyncGradient) -> None:
],
ids=["standard", "custom http client"],
)
- def test_absolute_request_url(self, client: AsyncGradient) -> None:
+ async def test_absolute_request_url(self, client: AsyncGradient) -> None:
request = client._build_request(
FinalRequestOptions(
method="post",
@@ -1775,49 +1948,53 @@ def test_absolute_request_url(self, client: AsyncGradient) -> None:
),
)
assert request.url == "https://myapi.com/foo"
+ await client.close()
async def test_copied_client_does_not_close_http(self) -> None:
- client = AsyncGradient(
+ test_client = AsyncGradient(
base_url=base_url,
access_token=access_token,
model_access_key=model_access_key,
agent_access_key=agent_access_key,
_strict_response_validation=True,
)
- assert not client.is_closed()
+ assert not test_client.is_closed()
- copied = client.copy()
- assert copied is not client
+ copied = test_client.copy()
+ assert copied is not test_client
del copied
await asyncio.sleep(0.2)
- assert not client.is_closed()
+ assert not test_client.is_closed()
async def test_client_context_manager(self) -> None:
- client = AsyncGradient(
+ test_client = AsyncGradient(
base_url=base_url,
access_token=access_token,
model_access_key=model_access_key,
agent_access_key=agent_access_key,
_strict_response_validation=True,
)
- async with client as c2:
- assert c2 is client
+ async with test_client as c2:
+ assert c2 is test_client
assert not c2.is_closed()
- assert not client.is_closed()
- assert client.is_closed()
+ assert not test_client.is_closed()
+ assert test_client.is_closed()
@pytest.mark.respx(base_url=base_url)
- @pytest.mark.asyncio
- async def test_client_response_validation_error(self, respx_mock: MockRouter) -> None:
+ async def test_client_response_validation_error(
+ self, respx_mock: MockRouter, async_client: AsyncGradient
+ ) -> None:
class Model(BaseModel):
foo: str
- respx_mock.get("/foo").mock(return_value=httpx.Response(200, json={"foo": {"invalid": True}}))
+ respx_mock.get("/foo").mock(
+ return_value=httpx.Response(200, json={"foo": {"invalid": True}})
+ )
with pytest.raises(APIResponseValidationError) as exc:
- await self.client.get("/foo", cast_to=Model)
+ await async_client.get("/foo", cast_to=Model)
assert isinstance(exc.value.__cause__, ValidationError)
@@ -1833,24 +2010,32 @@ async def test_client_max_retries_validation(self) -> None:
)
@pytest.mark.respx(base_url=base_url)
- @pytest.mark.asyncio
- async def test_default_stream_cls(self, respx_mock: MockRouter) -> None:
+ async def test_default_stream_cls(
+ self, respx_mock: MockRouter, async_client: AsyncGradient
+ ) -> None:
class Model(BaseModel):
name: str
- respx_mock.post("/foo").mock(return_value=httpx.Response(200, json={"foo": "bar"}))
+ respx_mock.post("/foo").mock(
+ return_value=httpx.Response(200, json={"foo": "bar"})
+ )
- stream = await self.client.post("/foo", cast_to=Model, stream=True, stream_cls=AsyncStream[Model])
+ stream = await async_client.post(
+ "/foo", cast_to=Model, stream=True, stream_cls=AsyncStream[Model]
+ )
assert isinstance(stream, AsyncStream)
await stream.response.aclose()
@pytest.mark.respx(base_url=base_url)
- @pytest.mark.asyncio
- async def test_received_text_for_expected_json(self, respx_mock: MockRouter) -> None:
+ async def test_received_text_for_expected_json(
+ self, respx_mock: MockRouter
+ ) -> None:
class Model(BaseModel):
name: str
- respx_mock.get("/foo").mock(return_value=httpx.Response(200, text="my-custom-format"))
+ respx_mock.get("/foo").mock(
+ return_value=httpx.Response(200, text="my-custom-format")
+ )
strict_client = AsyncGradient(
base_url=base_url,
@@ -1863,7 +2048,7 @@ class Model(BaseModel):
with pytest.raises(APIResponseValidationError):
await strict_client.get("/foo", cast_to=Model)
- client = AsyncGradient(
+ non_strict_client = AsyncGradient(
base_url=base_url,
access_token=access_token,
model_access_key=model_access_key,
@@ -1871,9 +2056,12 @@ class Model(BaseModel):
_strict_response_validation=False,
)
- response = await client.get("/foo", cast_to=Model)
+ response = await non_strict_client.get("/foo", cast_to=Model)
assert isinstance(response, str) # type: ignore[unreachable]
+ await strict_client.close()
+ await non_strict_client.close()
+
@pytest.mark.parametrize(
"remaining_retries,retry_after,timeout",
[
@@ -1897,8 +2085,10 @@ class Model(BaseModel):
)
@mock.patch("time.time", mock.MagicMock(return_value=1696004797))
@pytest.mark.asyncio
- async def test_parse_retry_after_header(self, remaining_retries: int, retry_after: str, timeout: float) -> None:
- client = AsyncGradient(
+ async def test_parse_retry_after_header(
+ self, remaining_retries: int, retry_after: str, timeout: float
+ ) -> None:
+ async_client = AsyncGradient(
base_url=base_url,
access_token=access_token,
model_access_key=model_access_key,
@@ -1908,15 +2098,21 @@ async def test_parse_retry_after_header(self, remaining_retries: int, retry_afte
headers = httpx.Headers({"retry-after": retry_after})
options = FinalRequestOptions(method="get", url="/foo", max_retries=3)
- calculated = client._calculate_retry_timeout(remaining_retries, options, headers)
- assert calculated == pytest.approx(timeout, 0.5 * 0.875) # pyright: ignore[reportUnknownMemberType]
+ calculated = async_client._calculate_retry_timeout(
+ remaining_retries, options, headers
+ )
+ assert calculated == pytest.approx(timeout, rel=0.5 * 0.875) # type: ignore[misc]
- @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch(
+ "gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout
+ )
@pytest.mark.respx(base_url=base_url)
async def test_retrying_timeout_errors_doesnt_leak(
self, respx_mock: MockRouter, async_client: AsyncGradient
) -> None:
- respx_mock.post("/chat/completions").mock(side_effect=httpx.TimeoutException("Test timeout error"))
+ respx_mock.post("/chat/completions").mock(
+ side_effect=httpx.TimeoutException("Test timeout error")
+ )
with pytest.raises(APITimeoutError):
await async_client.chat.completions.with_streaming_response.create(
@@ -1929,9 +2125,11 @@ async def test_retrying_timeout_errors_doesnt_leak(
model="llama3-8b-instruct",
).__aenter__()
- assert _get_open_connections(self.client) == 0
+ assert _get_open_connections(async_client) == 0
- @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch(
+ "gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout
+ )
@pytest.mark.respx(base_url=base_url)
async def test_retrying_status_errors_doesnt_leak(
self, respx_mock: MockRouter, async_client: AsyncGradient
@@ -1948,12 +2146,13 @@ async def test_retrying_status_errors_doesnt_leak(
],
model="llama3-8b-instruct",
).__aenter__()
- assert _get_open_connections(self.client) == 0
+ assert _get_open_connections(async_client) == 0
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
- @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch(
+ "gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout
+ )
@pytest.mark.respx(base_url=base_url)
- @pytest.mark.asyncio
@pytest.mark.parametrize("failure_mode", ["status", "exception"])
async def test_retries_taken(
self,
@@ -1988,12 +2187,16 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
)
assert response.retries_taken == failures_before_success
- assert int(response.http_request.headers.get("x-stainless-retry-count")) == failures_before_success
+ assert (
+ int(response.http_request.headers.get("x-stainless-retry-count"))
+ == failures_before_success
+ )
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
- @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch(
+ "gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout
+ )
@pytest.mark.respx(base_url=base_url)
- @pytest.mark.asyncio
async def test_omit_retry_count_header(
self,
async_client: AsyncGradient,
@@ -2024,12 +2227,15 @@ def retry_handler(_request: httpx.Request) -> httpx.Response:
extra_headers={"x-stainless-retry-count": Omit()},
)
- assert len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0
+ assert (
+ len(response.http_request.headers.get_list("x-stainless-retry-count")) == 0
+ )
@pytest.mark.parametrize("failures_before_success", [0, 2, 4])
- @mock.patch("gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout)
+ @mock.patch(
+ "gradient._base_client.BaseClient._calculate_retry_timeout", _low_retry_timeout
+ )
@pytest.mark.respx(base_url=base_url)
- @pytest.mark.asyncio
async def test_overwrite_retry_count_header(
self,
async_client: AsyncGradient,
@@ -2066,7 +2272,9 @@ async def test_get_platform(self) -> None:
platform = await asyncify(get_platform)()
assert isinstance(platform, (str, OtherPlatform))
- async def test_proxy_environment_variables(self, monkeypatch: pytest.MonkeyPatch) -> None:
+ async def test_proxy_environment_variables(
+ self, monkeypatch: pytest.MonkeyPatch
+ ) -> None:
# Test that the proxy environment variables are set correctly
monkeypatch.setenv("HTTPS_PROXY", "https://example.org")
@@ -2089,26 +2297,38 @@ async def test_default_client_creation(self) -> None:
)
@pytest.mark.respx(base_url=base_url)
- async def test_follow_redirects(self, respx_mock: MockRouter) -> None:
+ async def test_follow_redirects(
+ self, respx_mock: MockRouter, async_client: AsyncGradient
+ ) -> None:
# Test that the default follow_redirects=True allows following redirects
respx_mock.post("/redirect").mock(
- return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"})
+ return_value=httpx.Response(
+ 302, headers={"Location": f"{base_url}/redirected"}
+ )
+ )
+ respx_mock.get("/redirected").mock(
+ return_value=httpx.Response(200, json={"status": "ok"})
)
- respx_mock.get("/redirected").mock(return_value=httpx.Response(200, json={"status": "ok"}))
- response = await self.client.post("/redirect", body={"key": "value"}, cast_to=httpx.Response)
+ response = await async_client.post(
+ "/redirect", body={"key": "value"}, cast_to=httpx.Response
+ )
assert response.status_code == 200
assert response.json() == {"status": "ok"}
@pytest.mark.respx(base_url=base_url)
- async def test_follow_redirects_disabled(self, respx_mock: MockRouter) -> None:
+ async def test_follow_redirects_disabled(
+ self, respx_mock: MockRouter, async_client: AsyncGradient
+ ) -> None:
# Test that follow_redirects=False prevents following redirects
respx_mock.post("/redirect").mock(
- return_value=httpx.Response(302, headers={"Location": f"{base_url}/redirected"})
+ return_value=httpx.Response(
+ 302, headers={"Location": f"{base_url}/redirected"}
+ )
)
with pytest.raises(APIStatusError) as exc_info:
- await self.client.post(
+ await async_client.post(
"/redirect",
body={"key": "value"},
options={"follow_redirects": False},
diff --git a/tests/test_models.py b/tests/test_models.py
index de5ef465..ba635571 100644
--- a/tests/test_models.py
+++ b/tests/test_models.py
@@ -9,7 +9,7 @@
from gradient._utils import PropertyInfo
from gradient._compat import PYDANTIC_V1, parse_obj, model_dump, model_json
-from gradient._models import BaseModel, construct_type
+from gradient._models import DISCRIMINATOR_CACHE, BaseModel, construct_type
class BasicModel(BaseModel):
@@ -809,7 +809,7 @@ class B(BaseModel):
UnionType = cast(Any, Union[A, B])
- assert not hasattr(UnionType, "__discriminator__")
+ assert not DISCRIMINATOR_CACHE.get(UnionType)
m = construct_type(
value={"type": "b", "data": "foo"}, type_=cast(Any, Annotated[UnionType, PropertyInfo(discriminator="type")])
@@ -818,7 +818,7 @@ class B(BaseModel):
assert m.type == "b"
assert m.data == "foo" # type: ignore[comparison-overlap]
- discriminator = UnionType.__discriminator__
+ discriminator = DISCRIMINATOR_CACHE.get(UnionType)
assert discriminator is not None
m = construct_type(
@@ -830,7 +830,7 @@ class B(BaseModel):
# if the discriminator details object stays the same between invocations then
# we hit the cache
- assert UnionType.__discriminator__ is discriminator
+ assert DISCRIMINATOR_CACHE.get(UnionType) is discriminator
@pytest.mark.skipif(PYDANTIC_V1, reason="TypeAliasType is not supported in Pydantic v1")
diff --git a/tests/test_smoke_sdk_async.py b/tests/test_smoke_sdk_async.py
index e732ec73..49e1ca78 100644
--- a/tests/test_smoke_sdk_async.py
+++ b/tests/test_smoke_sdk_async.py
@@ -25,61 +25,71 @@ async def test_async_smoke_environment_and_client_state() -> None:
missing = [k for k in REQUIRED_ENV_VARS if not os.getenv(k)]
if missing:
pytest.fail(
- "Missing required environment variables for async smoke tests: " + ", ".join(missing),
+ "Missing required environment variables for async smoke tests: "
+ + ", ".join(missing),
pytrace=False,
)
- client = AsyncGradient()
-
- # Property assertions (auto-loaded from environment)
- assert client.access_token == os.environ["DIGITALOCEAN_ACCESS_TOKEN"], "access_token not loaded from env"
- assert client.model_access_key == os.environ["GRADIENT_MODEL_ACCESS_KEY"], "model_access_key not loaded from env"
- assert client.agent_access_key == os.environ["GRADIENT_AGENT_ACCESS_KEY"], "agent_access_key not loaded from env"
- expected_endpoint = os.environ["GRADIENT_AGENT_ENDPOINT"]
- normalized_expected = (
- expected_endpoint if expected_endpoint.startswith("https://") else f"https://{expected_endpoint}"
- )
- assert client.agent_endpoint == normalized_expected, "agent_endpoint not derived correctly from env"
+ async with AsyncGradient() as client:
+ # Property assertions (auto-loaded from environment)
+ assert (
+ client.access_token == os.environ["DIGITALOCEAN_ACCESS_TOKEN"]
+ ), "access_token not loaded from env"
+ assert (
+ client.model_access_key == os.environ["GRADIENT_MODEL_ACCESS_KEY"]
+ ), "model_access_key not loaded from env"
+ assert (
+ client.agent_access_key == os.environ["GRADIENT_AGENT_ACCESS_KEY"]
+ ), "agent_access_key not loaded from env"
+ expected_endpoint = os.environ["GRADIENT_AGENT_ENDPOINT"]
+ normalized_expected = (
+ expected_endpoint
+ if expected_endpoint.startswith("https://")
+ else f"https://{expected_endpoint}"
+ )
+ assert (
+ client.agent_endpoint == normalized_expected
+ ), "agent_endpoint not derived correctly from env"
@pytest.mark.smoke
@pytest.mark.asyncio
async def test_async_smoke_agents_listing() -> None:
- client = AsyncGradient()
- agents_list = await client.agents.list()
- assert agents_list is not None
- assert hasattr(agents_list, "agents")
+ async with AsyncGradient() as client:
+ agents_list = await client.agents.list()
+ assert agents_list is not None
+ assert hasattr(agents_list, "agents")
@pytest.mark.smoke
@pytest.mark.asyncio
async def test_async_smoke_gpu_droplets_listing() -> None:
- client = AsyncGradient()
- droplets_list = await client.gpu_droplets.list(type="gpus")
- assert droplets_list is not None
- assert hasattr(droplets_list, "droplets")
+ async with AsyncGradient() as client:
+ droplets_list = await client.gpu_droplets.list(type="gpus")
+ assert droplets_list is not None
+ assert hasattr(droplets_list, "droplets")
@pytest.mark.smoke
@pytest.mark.asyncio
async def test_async_smoke_inference_completion() -> None:
- inference_client = AsyncGradient()
- completion = await inference_client.chat.completions.create(
- model="llama3-8b-instruct",
- messages=[{"role": "user", "content": "ping"}],
- )
- assert completion is not None
- assert completion.choices
- assert completion.choices[0].message.content is not None
+ async with AsyncGradient() as inference_client:
+ completion = await inference_client.chat.completions.create(
+ model="llama3-8b-instruct",
+ messages=[{"role": "user", "content": "ping"}],
+ )
+ assert completion is not None
+ assert completion.choices
+ assert completion.choices[0].message.content is not None
@pytest.mark.smoke
@pytest.mark.asyncio
async def test_async_smoke_agent_inference_chat() -> None:
- agent_client = AsyncGradient()
- completion = await agent_client.agents.chat.completions.create(
- model="",
- messages=[{"role": "user", "content": "ping"}],
- )
- assert completion is not None
- assert completion.choices
+ async with AsyncGradient() as agent_client:
+ completion = await agent_client.agents.chat.completions.create(
+ model="",
+ messages=[{"role": "user", "content": "ping"}],
+ )
+ assert completion is not None
+ assert completion.choices