diff --git a/api/proto/v1/base_search.proto b/api/proto/v1/base_search.proto
index 55732017..28a0a992 100644
--- a/api/proto/v1/base_search.proto
+++ b/api/proto/v1/base_search.proto
@@ -50,7 +50,7 @@ message Hybrid {
repeated string properties = 2;
// protolint:disable:next REPEATED_FIELD_NAMES_PLURALIZED
repeated float vector = 3 [deprecated = true]; // will be removed in the future, use vectors
- float alpha = 4;
+ float alpha = 4 [deprecated = true]; // deprecated in 1.36.0 - use alpha_param
enum FusionType {
FUSION_TYPE_UNSPECIFIED = 0;
FUSION_TYPE_RANKED = 1;
@@ -63,6 +63,7 @@ message Hybrid {
NearVector near_vector = 9; // same as above. Use the target vector in the hybrid message
Targets targets = 10;
optional SearchOperatorOptions bm25_search_operator = 11;
+ optional float alpha_param = 12;
// only vector distance, but keep it extendable
oneof threshold {
diff --git a/api/rest/schema.check.json b/api/rest/schema.check.json
index 00cc7c1d..71f53a2e 100644
--- a/api/rest/schema.check.json
+++ b/api/rest/schema.check.json
@@ -618,6 +618,154 @@
},
"type": "object"
},
+ "ExportCreateRequest": {
+ "description": "Request to create a new export operation",
+ "type": "object",
+ "required": ["id"],
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for this export. Must be URL-safe."
+ },
+ "include": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "description": "List of collection names to include in the export. Cannot be used with 'exclude'."
+ },
+ "exclude": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "description": "List of collection names to exclude from the export. Cannot be used with 'include'."
+ },
+ "config": {
+ "type": "object",
+ "description": "Backend-specific configuration",
+ "properties": {
+ "bucket": {
+ "type": "string",
+ "description": "Bucket, container, or volume name for cloud storage backends"
+ },
+ "path": {
+ "type": "string",
+ "description": "Path prefix within the bucket or filesystem"
+ }
+ }
+ }
+ }
+ },
+ "ExportCreateResponse": {
+ "description": "Response from creating an export operation",
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for this export"
+ },
+ "backend": {
+ "type": "string",
+ "description": "The backend storage system used"
+ },
+ "path": {
+ "type": "string",
+ "description": "Full path where the export is being written"
+ },
+ "status": {
+ "type": "string",
+ "description": "Current status of the export",
+ "enum": ["STARTED"]
+ },
+ "startedAt": {
+ "type": "string",
+ "format": "date-time",
+ "description": "When the export started"
+ },
+ "classes": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "description": "List of collections being exported"
+ }
+ }
+ },
+ "ExportStatusResponse": {
+ "description": "Current status of an export operation",
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for this export"
+ },
+ "backend": {
+ "type": "string",
+ "description": "The backend storage system used"
+ },
+ "path": {
+ "type": "string",
+ "description": "Full path where the export is stored"
+ },
+ "status": {
+ "type": "string",
+ "description": "Current status of the export",
+ "enum": ["STARTED", "TRANSFERRING", "SUCCESS", "FAILED"]
+ },
+ "startedAt": {
+ "type": "string",
+ "format": "date-time",
+ "description": "When the export started"
+ },
+ "tookInMs": {
+ "type": "integer",
+ "format": "int64",
+ "description": "Duration of the export in milliseconds"
+ },
+ "classes": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ },
+ "description": "List of collections in this export"
+ },
+ "shardStatus": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "object",
+ "additionalProperties": {
+ "$ref": "#/definitions/ShardProgress"
+ }
+ },
+ "description": "Per-shard progress: className -> shardName -> status"
+ },
+ "error": {
+ "type": "string",
+ "description": "Error message if export failed"
+ }
+ }
+ },
+ "ShardProgress": {
+ "description": "Progress information for exporting a single shard",
+ "type": "object",
+ "properties": {
+ "status": {
+ "type": "string",
+ "description": "Status of this shard's export",
+ "enum": ["STARTED", "TRANSFERRING", "SUCCESS", "FAILED"]
+ },
+ "objectsExported": {
+ "type": "integer",
+ "format": "int64",
+ "description": "Number of objects exported from this shard"
+ },
+ "error": {
+ "type": "string",
+ "description": "Error message if this shard's export failed"
+ }
+ }
+ },
"GraphQLError": {
"description": "An error response caused by a GraphQL query.",
"properties": {
@@ -1728,8 +1876,10 @@
"STARTED",
"TRANSFERRING",
"TRANSFERRED",
+ "FINALIZING",
"SUCCESS",
"FAILED",
+ "CANCELLING",
"CANCELED"
]
},
@@ -1777,8 +1927,10 @@
"STARTED",
"TRANSFERRING",
"TRANSFERRED",
+ "FINALIZING",
"SUCCESS",
"FAILED",
+ "CANCELLING",
"CANCELED"
]
}
@@ -1900,6 +2052,12 @@
"items": {
"type": "string"
}
+ },
+ "incremental_base_backup_id": {
+ "description": "The ID of an existing backup to use as the base for a file-based incremental backup. If set, only files that have changed since the base backup will be included in the new backup.",
+ "type": "string",
+ "x-nullable": true,
+ "default": null
}
}
},
@@ -1941,8 +2099,10 @@
"STARTED",
"TRANSFERRING",
"TRANSFERRED",
+ "FINALIZING",
"SUCCESS",
"FAILED",
+ "CANCELLING",
"CANCELED"
]
}
@@ -1972,8 +2132,10 @@
"STARTED",
"TRANSFERRING",
"TRANSFERRED",
+ "FINALIZING",
"SUCCESS",
"FAILED",
+ "CANCELLING",
"CANCELED"
]
},
@@ -2064,8 +2226,10 @@
"STARTED",
"TRANSFERRING",
"TRANSFERRED",
+ "FINALIZING",
"SUCCESS",
"FAILED",
+ "CANCELLING",
"CANCELED"
]
}
@@ -3345,7 +3509,7 @@
},
"description": "# Introduction
Weaviate is an open source, AI-native vector database that helps developers create intuitive and reliable AI-powered applications.
### Base Path
The base path for the Weaviate server is structured as `[YOUR-WEAVIATE-HOST]:[PORT]/v1`. As an example, if you wish to access the `schema` endpoint on a local instance, you would navigate to `http://localhost:8080/v1/schema`. Ensure you replace `[YOUR-WEAVIATE-HOST]` and `[PORT]` with your actual server host and port number respectively.
### Questions?
If you have any comments or questions, please feel free to reach out to us at the community forum [https://forum.weaviate.io/](https://forum.weaviate.io/).
### Issues?
If you find a bug or want to file a feature request, please open an issue on our GitHub repository for [Weaviate](https://github.com/weaviate/weaviate).
### Need more documentation?
For a quickstart, code examples, concepts and more, please visit our [documentation page](https://docs.weaviate.io/weaviate).",
"title": "Weaviate REST API",
- "version": "1.36.0-rc.0"
+ "version": "1.37.0-dev"
},
"parameters": {
"CommonAfterParameterQuery": {
@@ -9199,6 +9363,137 @@
}
}
},
+ "/export/{backend}": {
+ "post": {
+ "summary": "Start a new export",
+ "description": "Initiates an export operation that writes collections to Parquet files on the specified backend storage (S3, GCS, Azure, or filesystem). Each collection is exported to a separate Parquet file.",
+ "operationId": "export.create",
+ "x-serviceIds": [
+ "weaviate.local.export.create"
+ ],
+ "tags": [
+ "export"
+ ],
+ "parameters": [
+ {
+ "name": "backend",
+ "in": "path",
+ "required": true,
+ "type": "string",
+ "description": "The backend storage system to use for the export (e.g., `filesystem`, `gcs`, `s3`, `azure`)."
+ },
+ {
+ "name": "body",
+ "in": "body",
+ "required": true,
+ "schema": {
+ "$ref": "#/definitions/ExportCreateRequest"
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "Successfully started export operation",
+ "schema": {
+ "$ref": "#/definitions/ExportCreateResponse"
+ }
+ },
+ "401": {
+ "description": "Unauthorized or invalid credentials"
+ },
+ "403": {
+ "description": "Forbidden - insufficient permissions",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "422": {
+ "description": "Invalid export request",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "500": {
+ "description": "Internal server error occurred while starting export",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ }
+ }
+ }
+ },
+ "/export/{backend}/{id}": {
+ "get": {
+ "summary": "Get export status",
+ "description": "Retrieves the current status of an export operation, including progress for each collection being exported.",
+ "operationId": "export.status",
+ "x-serviceIds": [
+ "weaviate.local.export.status"
+ ],
+ "tags": [
+ "export"
+ ],
+ "parameters": [
+ {
+ "name": "backend",
+ "in": "path",
+ "required": true,
+ "type": "string",
+ "description": "The backend storage system where the export is stored."
+ },
+ {
+ "name": "id",
+ "in": "path",
+ "required": true,
+ "type": "string",
+ "description": "The unique identifier of the export."
+ },
+ {
+ "name": "bucket",
+ "in": "query",
+ "required": false,
+ "type": "string",
+ "description": "Optional bucket name where the export is stored. If not specified, uses the backend's default bucket."
+ },
+ {
+ "name": "path",
+ "in": "query",
+ "required": false,
+ "type": "string",
+ "description": "Optional path prefix within the bucket. If not specified, uses the backend's default path."
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "Successfully retrieved export status",
+ "schema": {
+ "$ref": "#/definitions/ExportStatusResponse"
+ }
+ },
+ "401": {
+ "description": "Unauthorized or invalid credentials"
+ },
+ "403": {
+ "description": "Forbidden - insufficient permissions",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "404": {
+ "description": "Export not found",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ },
+ "500": {
+ "description": "Internal server error occurred while retrieving export status",
+ "schema": {
+ "$ref": "#/definitions/ErrorResponse"
+ }
+ }
+ }
+ }
+ },
"/cluster/statistics": {
"get": {
"summary": "Get cluster statistics",
@@ -9538,6 +9833,10 @@
"name": "backups",
"description": "Operations related to creating and managing backups of Weaviate data. This feature allows you to create snapshots of your collections and store them on external storage backends such as cloud object storage (S3, GCS, Azure) or a shared filesystem. These endpoints enable you to initiate backup and restore processes, monitor their status, list available backups on a backend, and delete unwanted backups. Backups are essential for disaster recovery, data migration, and maintaining point-in-time copies of your vector database."
},
+ {
+ "name": "exports",
+ "description": "Operations for exporting Weaviate data to Parquet format on external storage backends (S3, GCS, Azure, or filesystem). Exports provide a way to extract your vector data and object properties into a standardized columnar format for data analysis, archival, or migration. Each collection is exported to a separate Parquet file containing object IDs, vectors, properties, and metadata."
+ },
{
"name": "users",
"description": "Endpoints for user account management in Weaviate. This includes operations specific to Weaviate-managed database users (`db` users), such as creation (which generates an API key), listing, deletion, activation/deactivation, and API key rotation. It also provides operations applicable to any authenticated user (`db` or `oidc`), like retrieving their own information (username and assigned roles).
**User Types:**
* **`db` users:** Managed entirely within Weaviate (creation, deletion, API keys). Use these endpoints for full lifecycle management.
* **`oidc` users:** Authenticated via an external OpenID Connect provider. Their lifecycle (creation, credentials) is managed externally, but their role assignments *within Weaviate* are managed via the `authz` endpoints."
diff --git a/api/rest/schema.v3.yaml b/api/rest/schema.v3.yaml
index 3ff40a2a..3e549c54 100644
--- a/api/rest/schema.v3.yaml
+++ b/api/rest/schema.v3.yaml
@@ -181,6 +181,11 @@ components:
items:
type: string
type: array
+ incremental_base_backup_id:
+ default: "null"
+ description: The ID of an existing backup to use as the base for a file-based incremental backup. If set, only files that have changed since the base backup will be included in the new backup.
+ nullable: true
+ type: string
type: object
BackupCreateResponse:
description: The definition of a backup create response body
@@ -212,8 +217,10 @@ components:
- STARTED
- TRANSFERRING
- TRANSFERRED
+ - FINALIZING
- SUCCESS
- FAILED
+ - CANCELLING
- CANCELED
type: string
type: object
@@ -251,8 +258,10 @@ components:
- STARTED
- TRANSFERRING
- TRANSFERRED
+ - FINALIZING
- SUCCESS
- FAILED
+ - CANCELLING
- CANCELED
type: string
type: object
@@ -286,8 +295,10 @@ components:
- STARTED
- TRANSFERRING
- TRANSFERRED
+ - FINALIZING
- SUCCESS
- FAILED
+ - CANCELLING
- CANCELED
type: string
type: object
@@ -343,8 +354,10 @@ components:
- STARTED
- TRANSFERRING
- TRANSFERRED
+ - FINALIZING
- SUCCESS
- FAILED
+ - CANCELLING
- CANCELED
type: string
type: object
@@ -370,8 +383,10 @@ components:
- STARTED
- TRANSFERRING
- TRANSFERRED
+ - FINALIZING
- SUCCESS
- FAILED
+ - CANCELLING
- CANCELED
type: string
type: object
@@ -878,6 +893,106 @@ components:
type: object
type: array
type: object
+ ExportCreateRequest:
+ description: Request to create a new export operation
+ properties:
+ config:
+ description: Backend-specific configuration
+ properties:
+ bucket:
+ description: Bucket, container, or volume name for cloud storage backends
+ type: string
+ path:
+ description: Path prefix within the bucket or filesystem
+ type: string
+ type: object
+ exclude:
+ description: List of collection names to exclude from the export. Cannot be used with 'include'.
+ items:
+ type: string
+ type: array
+ id:
+ description: Unique identifier for this export. Must be URL-safe.
+ type: string
+ include:
+ description: List of collection names to include in the export. Cannot be used with 'exclude'.
+ items:
+ type: string
+ type: array
+ required:
+ - id
+ type: object
+ ExportCreateResponse:
+ description: Response from creating an export operation
+ properties:
+ backend:
+ description: The backend storage system used
+ type: string
+ classes:
+ description: List of collections being exported
+ items:
+ type: string
+ type: array
+ id:
+ description: Unique identifier for this export
+ type: string
+ path:
+ description: Full path where the export is being written
+ type: string
+ startedAt:
+ description: When the export started
+ format: date-time
+ type: string
+ status:
+ description: Current status of the export
+ enum:
+ - STARTED
+ type: string
+ type: object
+ ExportStatusResponse:
+ description: Current status of an export operation
+ properties:
+ backend:
+ description: The backend storage system used
+ type: string
+ classes:
+ description: List of collections in this export
+ items:
+ type: string
+ type: array
+ error:
+ description: Error message if export failed
+ type: string
+ id:
+ description: Unique identifier for this export
+ type: string
+ path:
+ description: Full path where the export is stored
+ type: string
+ shardStatus:
+ additionalProperties:
+ additionalProperties:
+ $ref: '#/components/schemas/ShardProgress'
+ type: object
+ description: 'Per-shard progress: className -> shardName -> status'
+ type: object
+ startedAt:
+ description: When the export started
+ format: date-time
+ type: string
+ status:
+ description: Current status of the export
+ enum:
+ - STARTED
+ - TRANSFERRING
+ - SUCCESS
+ - FAILED
+ type: string
+ tookInMs:
+ description: Duration of the export in milliseconds
+ format: int64
+ type: integer
+ type: object
GeoCoordinates:
properties:
latitude:
@@ -2184,6 +2299,25 @@ components:
SchemaHistory:
description: This is an open object, with OpenAPI Specification 3.0 this will be more detailed. See Weaviate docs for more info. In the future this will become a key/value OR a SingleRef definition.
type: object
+ ShardProgress:
+ description: Progress information for exporting a single shard
+ properties:
+ error:
+ description: Error message if this shard's export failed
+ type: string
+ objectsExported:
+ description: Number of objects exported from this shard
+ format: int64
+ type: integer
+ status:
+ description: Status of this shard's export
+ enum:
+ - STARTED
+ - TRANSFERRING
+ - SUCCESS
+ - FAILED
+ type: string
+ type: object
ShardStatus:
description: The status of a single shard
properties:
@@ -2552,7 +2686,7 @@ info:
url: https://github.com/weaviate
description: '# Introduction
Weaviate is an open source, AI-native vector database that helps developers create intuitive and reliable AI-powered applications.
### Base Path
The base path for the Weaviate server is structured as `[YOUR-WEAVIATE-HOST]:[PORT]/v1`. As an example, if you wish to access the `schema` endpoint on a local instance, you would navigate to `http://localhost:8080/v1/schema`. Ensure you replace `[YOUR-WEAVIATE-HOST]` and `[PORT]` with your actual server host and port number respectively.
### Questions?
If you have any comments or questions, please feel free to reach out to us at the community forum [https://forum.weaviate.io/](https://forum.weaviate.io/).
### Issues?
If you find a bug or want to file a feature request, please open an issue on our GitHub repository for [Weaviate](https://github.com/weaviate/weaviate).
### Need more documentation?
For a quickstart, code examples, concepts and more, please visit our [documentation page](https://docs.weaviate.io/weaviate).'
title: Weaviate REST API
- version: 1.36.0-rc.0
+ version: 1.37.0-dev
openapi: 3.0.1
paths:
/:
@@ -4728,6 +4862,120 @@ paths:
- cluster
x-serviceIds:
- weaviate.cluster.statistics.get
+ /export/{backend}:
+ post:
+ description: Initiates an export operation that writes collections to Parquet files on the specified backend storage (S3, GCS, Azure, or filesystem). Each collection is exported to a separate Parquet file.
+ operationId: export.create
+ parameters:
+ - description: The backend storage system to use for the export (e.g., `filesystem`, `gcs`, `s3`, `azure`).
+ in: path
+ name: backend
+ required: true
+ schema:
+ type: string
+ requestBody:
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ExportCreateRequest'
+ application/yaml:
+ schema:
+ $ref: '#/components/schemas/ExportCreateRequest'
+ required: true
+ responses:
+ "200":
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ExportCreateResponse'
+ description: Successfully started export operation
+ "401":
+ content: {}
+ description: Unauthorized or invalid credentials
+ "403":
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ErrorResponse'
+ description: Forbidden - insufficient permissions
+ "422":
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ErrorResponse'
+ description: Invalid export request
+ "500":
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ErrorResponse'
+ description: Internal server error occurred while starting export
+ summary: Start a new export
+ tags:
+ - export
+ x-codegen-request-body-name: body
+ x-serviceIds:
+ - weaviate.local.export.create
+ /export/{backend}/{id}:
+ get:
+ description: Retrieves the current status of an export operation, including progress for each collection being exported.
+ operationId: export.status
+ parameters:
+ - description: The backend storage system where the export is stored.
+ in: path
+ name: backend
+ required: true
+ schema:
+ type: string
+ - description: The unique identifier of the export.
+ in: path
+ name: id
+ required: true
+ schema:
+ type: string
+ - description: Optional bucket name where the export is stored. If not specified, uses the backend's default bucket.
+ in: query
+ name: bucket
+ schema:
+ type: string
+ - description: Optional path prefix within the bucket. If not specified, uses the backend's default path.
+ in: query
+ name: path
+ schema:
+ type: string
+ responses:
+ "200":
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ExportStatusResponse'
+ description: Successfully retrieved export status
+ "401":
+ content: {}
+ description: Unauthorized or invalid credentials
+ "403":
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ErrorResponse'
+ description: Forbidden - insufficient permissions
+ "404":
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ErrorResponse'
+ description: Export not found
+ "500":
+ content:
+ application/json:
+ schema:
+ $ref: '#/components/schemas/ErrorResponse'
+ description: Internal server error occurred while retrieving export status
+ summary: Get export status
+ tags:
+ - export
+ x-serviceIds:
+ - weaviate.local.export.status
/graphql:
post:
description: Executes a single GraphQL query provided in the request body. Use this endpoint for all Weaviate data queries and exploration.
@@ -8121,6 +8369,8 @@ tags:
name: schema
- description: Operations related to creating and managing backups of Weaviate data. This feature allows you to create snapshots of your collections and store them on external storage backends such as cloud object storage (S3, GCS, Azure) or a shared filesystem. These endpoints enable you to initiate backup and restore processes, monitor their status, list available backups on a backend, and delete unwanted backups. Backups are essential for disaster recovery, data migration, and maintaining point-in-time copies of your vector database.
name: backups
+ - description: Operations for exporting Weaviate data to Parquet format on external storage backends (S3, GCS, Azure, or filesystem). Exports provide a way to extract your vector data and object properties into a standardized columnar format for data analysis, archival, or migration. Each collection is exported to a separate Parquet file containing object IDs, vectors, properties, and metadata.
+ name: exports
- description: Endpoints for user account management in Weaviate. This includes operations specific to Weaviate-managed database users (`db` users), such as creation (which generates an API key), listing, deletion, activation/deactivation, and API key rotation. It also provides operations applicable to any authenticated user (`db` or `oidc`), like retrieving their own information (username and assigned roles).
**User Types:**
* **`db` users:** Managed entirely within Weaviate (creation, deletion, API keys). Use these endpoints for full lifecycle management.
* **`oidc` users:** Authenticated via an external OpenID Connect provider. Their lifecycle (creation, credentials) is managed externally, but their role assignments *within Weaviate* are managed via the `authz` endpoints.
name: users
- description: 'Endpoints for managing Weaviate''s Role-Based Access Control (RBAC) system. Access to Weaviate resources is granted through roles, which are collections of fine-grained permissions.
**Permissions:** Define allowed actions (e.g., `read_data`, `create_collections`, `delete_users`) on specific resources. Resources can be specified broadly (e.g., all collections: `*`) or narrowly (e.g., a specific collection name, tenant pattern, user name, or role name).
**Roles:** Are named sets of permissions. Managing roles involves creating roles with specific permissions, retrieving role definitions, deleting roles, and adding or removing permissions from existing roles.
**Role assignment:** Roles grant their contained permissions to users or groups. These endpoints allow assigning roles to:
* `db` users: Users managed directly by Weaviate via API or environment variables, authenticating with API keys.
* `oidc` users: Users authenticated via an external OpenID Connect provider, managed externally but assigned roles within Weaviate.
* OIDC `groups`: Users authenticated via OIDC who belong to a group automatically inherit roles assigned to that group.
Operations also include revoking roles, checking if a role has a specific permission, listing roles assigned to a user, and listing users/groups assigned to a role. The authorization framework applies universally to both `db` and `oidc` users based on their assigned roles.'
diff --git a/backup/client.go b/backup/client.go
new file mode 100644
index 00000000..087d1ce7
--- /dev/null
+++ b/backup/client.go
@@ -0,0 +1,261 @@
+package backup
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/weaviate/weaviate-go-client/v6/internal"
+ "github.com/weaviate/weaviate-go-client/v6/internal/api"
+ "github.com/weaviate/weaviate-go-client/v6/internal/dev"
+)
+
+func NewClient(t internal.Transport) *Client {
+ dev.AssertNotNil(t, "transport")
+ return &Client{transport: t}
+}
+
+type Client struct {
+ transport internal.Transport
+}
+
+type Info struct {
+ Backend string // Backup storage backend
+ ID string // Backup ID
+ Path string // Path to backup in the backend storage
+ Error string // Backup creation / restoration error.
+ Status Status // Backup creation / restoration status.
+
+ StartedAt *time.Time // Time at which the backup creation started.
+ CompletedAt *time.Time // Time at which the backup was completed, successfully or otherwise.
+
+ // IncludesCollections is always empty for backups that are still being created.
+ // This field will be populated for restored backups and already-created backups
+ // returned from List, provided these backups included at least one collection.
+ IncludesCollections []string // Collections included in the backup
+
+ // Backup size in GiB. Similarly to IncludesCollections,
+ // this value only exists for completed backups.
+ SizeGiB *float32
+
+ c *Client
+ operation api.BackupOperation
+}
+
+// IsCompleted returns true if the backup operation has completed, successfully or otherwise.
+// All backups returned from List are completed by definition.
+func (i *Info) IsCompleted() bool {
+ return i.operation == completed ||
+ i.Status == StatusSuccess ||
+ i.Status == StatusFailed ||
+ i.Status == StatusCanceled
+}
+
+type (
+ CompressionLevel api.BackupCompressionLevel
+ Status api.BackupStatus
+ RBACRestore api.RBACRestoreOption
+)
+
+const (
+ CompressionLevelDefault CompressionLevel = CompressionLevel(api.BackupCompressionLevelDefault)
+ CompressionLevelBestSpeed CompressionLevel = CompressionLevel(api.BackupCompressionLevelBestSpeed)
+ CompressionLevelBestCompression CompressionLevel = CompressionLevel(api.BackupCompressionLevelBestCompression)
+ CompressionLevelZstdDefault CompressionLevel = CompressionLevel(api.BackupCompressionLevelZstdDefault)
+ CompressionLevelZstdBestSpeed CompressionLevel = CompressionLevel(api.BackupCompressionLevelZstdBestSpeed)
+ CompressionLevelZstdBestCompression CompressionLevel = CompressionLevel(api.BackupCompressionLevelZstdBestCompression)
+ CompressionLevelNone CompressionLevel = CompressionLevel(api.BackupCompressionLevelNone)
+
+ StatusStarted Status = Status(api.BackupStatusStarted)
+ StatusTransferring Status = Status(api.BackupStatusTransferring)
+ StatusTransferred Status = Status(api.BackupStatusTransferred)
+ StatusFinalizing Status = Status(api.BackupStatusFinalizing)
+ StatusCanceling Status = Status(api.BackupStatusCanceling)
+ StatusCanceled Status = Status(api.BackupStatusCanceled)
+ StatusSuccess Status = Status(api.BackupStatusSuccess)
+ StatusFailed Status = Status(api.BackupStatusFailed)
+
+ RBACRestoreAll RBACRestore = RBACRestore(api.RBACRestoreAll)
+ RBACRestoreNone RBACRestore = RBACRestore(api.RBACRestoreNone)
+)
+
+type Create struct {
+ Backend string // Required: backend storage.
+ ID string // Required: backup ID.
+ Path string // Path to backup in the backend storage.
+ Endpoint string // Name of the endpoint, e.g. s3.amazonaws.com
+ Bucket string // Dedicated bucket name.
+ IncludeCollections []string // Collections to be included in the backup.
+ ExcludeCollections []string // Collections to be excluded from the backup.
+ PrefixIncremental string // Backup ID prefix. Setting it enables incremental backups.
+ MaxCPUPercentage int // Maximum %CPU utilization.
+ ChunkSizeMiB int // Target chunk size in MiB.
+ CompressionLevel CompressionLevel // Hint for selecting the optimal compression algorithm.
+}
+
+/** Create a new backup.*/
+func (c *Client) Create(ctx context.Context, options Create) (*Info, error) {
+ req := &api.CreateBackupRequest{
+ Backend: options.Backend,
+ ID: options.ID,
+ Bucket: options.Bucket,
+ BackupPath: options.Path,
+ Endpoint: options.Endpoint,
+ IncludeCollections: options.IncludeCollections,
+ ExcludeCollections: options.ExcludeCollections,
+ PrefixIncremental: options.PrefixIncremental,
+ MaxCPUPercentage: options.MaxCPUPercentage,
+ ChunkSizeMiB: options.ChunkSizeMiB,
+ CompressionLevel: api.BackupCompressionLevel(options.CompressionLevel),
+ }
+
+ var resp api.BackupInfo
+ if err := c.transport.Do(ctx, req, &resp); err != nil {
+ return nil, fmt.Errorf("create backup: %w", err)
+ }
+
+ info := infoFromAPI(&resp, c, api.BackupOperationCreate)
+ return &info, nil
+}
+
+type Restore struct {
+ Backend string // Required: backend storage.
+ ID string // Required: backup ID.
+ Path string // Path to backup in the backend storage.
+ Endpoint string // Name of the endpoint, e.g. s3.amazonaws.com
+ Bucket string // Dedicated bucket name.
+ IncludeCollections []string // Collections to be included in the backup.
+ ExcludeCollections []string // Collections to be excluded from the backup.
+ MaxCPUPercentage int // Maximum %CPU utilization.
+ OverwriteAlias bool // Allow overwriting aliases.
+ RestoreUsers RBACRestore // Select strategy for restoring RBAC users.
+ RestoreRoles RBACRestore // Select strategy for restoring RBAC roles.
+ NodeMapping map[string]string // Remap node names stored in the backup.
+}
+
+func (c *Client) Restore(ctx context.Context, options Restore) (*Info, error) {
+ req := &api.RestoreBackupRequest{
+ Backend: options.Backend,
+ ID: options.ID,
+ Bucket: options.Bucket,
+ BackupPath: options.Path,
+ Endpoint: options.Endpoint,
+ IncludeCollections: options.IncludeCollections,
+ ExcludeCollections: options.ExcludeCollections,
+ MaxCPUPercentage: options.MaxCPUPercentage,
+ OverwriteAlias: options.OverwriteAlias,
+ RestoreUsers: api.RBACRestoreOption(options.RestoreUsers),
+ RestoreRoles: api.RBACRestoreOption(options.RestoreRoles),
+ NodeMapping: options.NodeMapping,
+ }
+
+ var resp api.BackupInfo
+ if err := c.transport.Do(ctx, req, &resp); err != nil {
+ return nil, fmt.Errorf("restore backup: %w", err)
+ }
+
+ info := infoFromAPI(&resp, c, api.BackupOperationRestore)
+ return &info, nil
+}
+
+type GetStatus struct {
+ Backend string // Required: Backend storage.
+ ID string // Required: Backup ID.
+}
+
+func (c *Client) GetCreateStatus(ctx context.Context, options GetStatus) (*Info, error) {
+ return c.getStatus(ctx, options, api.BackupOperationCreate)
+}
+
+func (c *Client) GetRestoreStatus(ctx context.Context, options GetStatus) (*Info, error) {
+ return c.getStatus(ctx, options, api.BackupOperationRestore)
+}
+
+func (c *Client) getStatus(ctx context.Context, options GetStatus, operation api.BackupOperation) (*Info, error) {
+ req := &api.BackupStatusRequest{
+ Backend: options.Backend,
+ ID: options.ID,
+ Operation: operation,
+ }
+
+ var resp api.BackupInfo
+ if err := c.transport.Do(ctx, req, &resp); err != nil {
+ return nil, fmt.Errorf("get backup status: %w", err)
+ }
+
+ info := infoFromAPI(&resp, c, operation)
+ return &info, nil
+}
+
+type List struct {
+ Backend string // Required: Backend storage.
+ StartingTimeAsc bool // Set to true to order backups by their StartedAt time in ascending order.
+}
+
+func (c *Client) List(ctx context.Context, options List) ([]Info, error) {
+ req := &api.ListBackupsRequest{
+ Backend: options.Backend,
+ StartingTimeAsc: options.StartingTimeAsc,
+ }
+
+ var resp []api.BackupInfo
+ if err := c.transport.Do(ctx, req, &resp); err != nil {
+ return nil, fmt.Errorf("list backups: %w", err)
+ }
+
+ infos := make([]Info, len(resp))
+ for i, bak := range resp {
+ infos[i] = infoFromAPI(&bak, c, completed)
+ }
+ return infos, nil
+}
+
+type Cancel struct {
+ Backend string // Required: Backend storage.
+ ID string // Required: Backup ID.
+}
+
+// Cancel an in-progress backup creation.
+func (c *Client) CancelCreate(ctx context.Context, options Cancel) error {
+ return c.cancel(ctx, options, api.BackupOperationCreate)
+}
+
+// Cancel an in-progress backup restoration.
+func (c *Client) CancelRestore(ctx context.Context, options Cancel) error {
+ return c.cancel(ctx, options, api.BackupOperationRestore)
+}
+
+func (c *Client) cancel(ctx context.Context, options Cancel, op api.BackupOperation) error {
+ req := api.CancelBackupRequest{
+ Backend: options.Backend,
+ ID: options.ID,
+ Operation: op,
+ }
+ if err := c.transport.Do(ctx, &req, nil); err != nil {
+ return fmt.Errorf("cancel backup: %w", err)
+ }
+ return nil
+}
+
+// completed is a special flag for operations returned from List.
+// Those operations technically have api.CreateBackup origin, but
+// Info.operation is used to await backup completion (or any other status),
+// and, since they are already completed, we can give awaiters a hint.
+const completed api.BackupOperation = api.BackupOperation(api.BackupOperationCreate - 1)
+
+func infoFromAPI(bak *api.BackupInfo, c *Client, op api.BackupOperation) Info {
+ return Info{
+ ID: bak.ID,
+ Path: bak.Path,
+ Backend: bak.Backend,
+ Status: Status(bak.Status),
+ Error: bak.Error,
+ StartedAt: bak.StartedAt,
+ CompletedAt: bak.CompletedAt,
+ IncludesCollections: bak.IncludesCollections,
+ SizeGiB: bak.SizeGiB,
+
+ operation: op,
+ c: c,
+ }
+}
diff --git a/backup/client_test.go b/backup/client_test.go
new file mode 100644
index 00000000..ace25fba
--- /dev/null
+++ b/backup/client_test.go
@@ -0,0 +1,299 @@
+package backup_test
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+ "github.com/weaviate/weaviate-go-client/v6/backup"
+ "github.com/weaviate/weaviate-go-client/v6/internal/api"
+ "github.com/weaviate/weaviate-go-client/v6/internal/testkit"
+)
+
+func TestNewClient(t *testing.T) {
+ require.Panics(t, func() {
+ backup.NewClient(nil)
+ }, "nil transport")
+}
+
+func TestClient_Create(t *testing.T) {
+ for _, tt := range []struct {
+ name string
+ create backup.Create
+ stubs []testkit.Stub[api.CreateBackupRequest, api.BackupInfo]
+ want *backup.Info // Expected return value.
+ err testkit.Error // Expected error.
+ }{
+ {
+ name: "successfully",
+ create: backup.Create{
+ Backend: "filesystem",
+ ID: "bak-1",
+ Path: "/path/to/backup",
+ Endpoint: "s3.amazonaws.com",
+ Bucket: "my-backups",
+ IncludeCollections: []string{"Songs"},
+ ExcludeCollections: []string{"Pizza"},
+ PrefixIncremental: "incr-bak-",
+ MaxCPUPercentage: 92,
+ ChunkSizeMiB: 20,
+ CompressionLevel: backup.CompressionLevelDefault,
+ },
+ stubs: []testkit.Stub[api.CreateBackupRequest, api.BackupInfo]{
+ {
+ Request: &api.CreateBackupRequest{
+ Backend: "filesystem",
+ ID: "bak-1",
+ BackupPath: "/path/to/backup",
+ Endpoint: "s3.amazonaws.com",
+ Bucket: "my-backups",
+ IncludeCollections: []string{"Songs"},
+ ExcludeCollections: []string{"Pizza"},
+ PrefixIncremental: "incr-bak-",
+ MaxCPUPercentage: 92,
+ ChunkSizeMiB: 20,
+ CompressionLevel: api.BackupCompressionLevelDefault,
+ },
+ Response: api.BackupInfo{
+ Backend: "filesystem",
+ ID: "bak-1",
+ Path: "/path/to/backup",
+ Status: api.BackupStatusStarted,
+ IncludesCollections: []string{"Songs"},
+ },
+ },
+ },
+ want: &backup.Info{
+ Backend: "filesystem",
+ ID: "bak-1",
+ Path: "/path/to/backup",
+ Status: backup.StatusStarted,
+ IncludesCollections: []string{"Songs"},
+ },
+ },
+ {
+ name: "with error",
+ stubs: []testkit.Stub[api.CreateBackupRequest, api.BackupInfo]{
+ {Err: testkit.ErrWhaam},
+ },
+ err: testkit.ExpectError,
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ transport := testkit.NewTransport(t, tt.stubs)
+ c := backup.NewClient(transport)
+ require.NotNil(t, c, "nil client")
+
+ got, err := c.Create(t.Context(), tt.create)
+ tt.err.Require(t, err, "create error")
+ require.EqualExportedValues(t, tt.want, got, "returned info")
+ })
+ }
+}
+
+func TestClient_Restore(t *testing.T) {
+ for _, tt := range []struct {
+ name string
+ restore backup.Restore
+ stubs []testkit.Stub[api.RestoreBackupRequest, api.BackupInfo]
+ want *backup.Info // Expected return value.
+ err testkit.Error // Expected error.
+ }{
+ {
+ name: "successfully",
+ restore: backup.Restore{
+ Backend: "filesystem",
+ ID: "bak-1",
+ Path: "/path/to/backup",
+ Endpoint: "s3.amazonaws.com",
+ Bucket: "my-backups",
+ IncludeCollections: []string{"Songs"},
+ ExcludeCollections: []string{"Pizza"},
+ MaxCPUPercentage: 92,
+ OverwriteAlias: true,
+ RestoreUsers: backup.RBACRestoreAll,
+ RestoreRoles: backup.RBACRestoreNone,
+ NodeMapping: map[string]string{"node-1": "node-a"},
+ },
+ stubs: []testkit.Stub[api.RestoreBackupRequest, api.BackupInfo]{
+ {
+ Request: &api.RestoreBackupRequest{
+ Backend: "filesystem",
+ ID: "bak-1",
+ BackupPath: "/path/to/backup",
+ Endpoint: "s3.amazonaws.com",
+ Bucket: "my-backups",
+ IncludeCollections: []string{"Songs"},
+ ExcludeCollections: []string{"Pizza"},
+ MaxCPUPercentage: 92,
+ OverwriteAlias: true,
+ RestoreUsers: api.RBACRestoreAll,
+ RestoreRoles: api.RBACRestoreNone,
+ NodeMapping: map[string]string{"node-1": "node-a"},
+ },
+ Response: api.BackupInfo{
+ Backend: "filesystem",
+ ID: "bak-1",
+ Path: "/path/to/backup",
+ Status: api.BackupStatusSuccess,
+ IncludesCollections: []string{"Songs"},
+ SizeGiB: testkit.Ptr[float32](.6),
+ },
+ },
+ },
+ want: &backup.Info{
+ Backend: "filesystem",
+ ID: "bak-1",
+ Path: "/path/to/backup",
+ Status: backup.StatusSuccess,
+ IncludesCollections: []string{"Songs"},
+ SizeGiB: testkit.Ptr[float32](.6),
+ },
+ },
+ {
+ name: "with error",
+ stubs: []testkit.Stub[api.RestoreBackupRequest, api.BackupInfo]{
+ {Err: testkit.ErrWhaam},
+ },
+ err: testkit.ExpectError,
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ transport := testkit.NewTransport(t, tt.stubs)
+ c := backup.NewClient(transport)
+ require.NotNil(t, c, "nil client")
+
+ got, err := c.Restore(t.Context(), tt.restore)
+ tt.err.Require(t, err, "restore error")
+ require.EqualExportedValues(t, tt.want, got, "returned info")
+ })
+ }
+}
+
+func TestClient_List(t *testing.T) {
+ for _, tt := range []struct {
+ name string
+ list backup.List
+ stubs []testkit.Stub[api.ListBackupsRequest, []api.BackupInfo]
+ want []backup.Info // Expected return value.
+ err testkit.Error // Expected error.
+ }{
+ {
+ name: "successfully",
+ list: backup.List{
+ Backend: "filesystem",
+ StartingTimeAsc: true,
+ },
+ stubs: []testkit.Stub[api.ListBackupsRequest, []api.BackupInfo]{
+ {
+ Request: &api.ListBackupsRequest{
+ Backend: "filesystem",
+ StartingTimeAsc: true,
+ },
+ Response: []api.BackupInfo{
+ {ID: "bak-1"},
+ {ID: "bak-2"},
+ {ID: "bak-3"},
+ },
+ },
+ },
+ want: []backup.Info{
+ {ID: "bak-1"},
+ {ID: "bak-2"},
+ {ID: "bak-3"},
+ },
+ },
+ {
+ name: "with error",
+ stubs: []testkit.Stub[api.ListBackupsRequest, []api.BackupInfo]{
+ {Err: testkit.ErrWhaam},
+ },
+ err: testkit.ExpectError,
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ transport := testkit.NewTransport(t, tt.stubs)
+ c := backup.NewClient(transport)
+ require.NotNil(t, c, "nil client")
+
+ got, err := c.List(t.Context(), tt.list)
+ tt.err.Require(t, err, "list error")
+ require.EqualExportedValues(t, tt.want, got, "returned info")
+ })
+ }
+}
+
+func TestClient_Cancel(t *testing.T) {
+ for _, tt := range []struct {
+ name string
+ cancel func(context.Context, *backup.Client) error // Call appropriate cancel function.
+ stubs []testkit.Stub[api.CancelBackupRequest, any]
+ err testkit.Error // Expected error.
+ }{
+ {
+ name: "cancel create successfully",
+ cancel: func(ctx context.Context, c *backup.Client) error {
+ return c.CancelCreate(ctx, backup.Cancel{
+ Backend: "filesystem",
+ ID: "bak-1",
+ })
+ },
+ stubs: []testkit.Stub[api.CancelBackupRequest, any]{
+ {
+ Request: &api.CancelBackupRequest{
+ Backend: "filesystem",
+ ID: "bak-1",
+ Operation: api.BackupOperationCreate,
+ },
+ },
+ },
+ },
+ {
+ name: "cancel create with error",
+ cancel: func(ctx context.Context, c *backup.Client) error {
+ return c.CancelCreate(ctx, backup.Cancel{})
+ },
+ stubs: []testkit.Stub[api.CancelBackupRequest, any]{
+ {Err: testkit.ErrWhaam},
+ },
+ err: testkit.ExpectError,
+ },
+ {
+ name: "cancel restore successfully",
+ cancel: func(ctx context.Context, c *backup.Client) error {
+ return c.CancelRestore(ctx, backup.Cancel{
+ Backend: "filesystem",
+ ID: "bak-1",
+ })
+ },
+ stubs: []testkit.Stub[api.CancelBackupRequest, any]{
+ {
+ Request: &api.CancelBackupRequest{
+ Backend: "filesystem",
+ ID: "bak-1",
+ Operation: api.BackupOperationRestore,
+ },
+ },
+ },
+ },
+ {
+ name: "cancel restore with error",
+ cancel: func(ctx context.Context, c *backup.Client) error {
+ return c.CancelRestore(ctx, backup.Cancel{})
+ },
+ stubs: []testkit.Stub[api.CancelBackupRequest, any]{
+ {Err: testkit.ErrWhaam},
+ },
+ err: testkit.ExpectError,
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ transport := testkit.NewTransport(t, tt.stubs)
+ c := backup.NewClient(transport)
+ require.NotNil(t, c, "nil client")
+
+ err := tt.cancel(t.Context(), c)
+ tt.err.Require(t, err, "cancel error")
+ })
+ }
+}
diff --git a/backup/wait.go b/backup/wait.go
new file mode 100644
index 00000000..a874c803
--- /dev/null
+++ b/backup/wait.go
@@ -0,0 +1,127 @@
+package backup
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/weaviate/weaviate-go-client/v6/internal/dev"
+)
+
+// pollingInterval is currently the only configurable AwaitOption.
+// We keep the type unexported to be able to extend it later.
+type pollingInterval time.Duration
+
+const (
+ defaultTimeout = 1 * time.Hour
+ defaultPollingInterval pollingInterval = pollingInterval(1 * time.Second)
+)
+
+var (
+ // To be awaited, backups need to have a valid [Info.operation] field
+ // and a non-nil *Client in [Info.c]. Client correctly populates these
+ // fields for Info returned from Create, Restore, GetCreateStatus, and GetRestoreStatus.
+ errBackupNotAwaitable = errors.New("only backups returned from Create / Restore and GetStatus are awaitable")
+
+ // Backup completed without reaching desired state. For example, a backup might
+ // have been canceled while awaiting StatusSuccess, in which case it will never
+ // reach state. The next status check should recognize this and return errBackupStatusFallthrough.
+ errBackupStatusFallthrough = errors.New("backup completed without reaching desired status")
+)
+
+// AwaitOption controls backup status polling.
+type AwaitOption func(*pollingInterval)
+
+// WithPollingInterval sets custom polling interval for checking backup status.
+// Use [context.WithDeadline] to set await deadline.
+func WithPollingInterval(d time.Duration) AwaitOption {
+ return func(pi *pollingInterval) { *pi = pollingInterval(d) }
+}
+
+// AwaitCompletion is an AwaitStatus wrapper that awaits [StatusSucess].
+func AwaitCompletion(ctx context.Context, backup *Info, options ...AwaitOption) (*Info, error) {
+ return AwaitStatus(ctx, backup, StatusSuccess, options...)
+}
+
+// AwaitStatus blocks until backup reaches the desired state or otherwise completes.
+//
+// By default, AwaitStatus will poll backup status once per second and time out after 1 hour.
+// The inverval can be adjusted via [WithPollingInterval]. Use [context.WithDeadline] to set a different deadline.
+//
+// AwaitStatus SHOULD only be called with [Info] obtained from either Create / Restore,
+// or GetCreateStatus / GetRestoreStatus, as these will correcly populate the struct's private fields.
+//
+// Example:
+//
+// // GOOD:
+// bak, _ := c.Backup.Create(ctx, "bak-1", "filesystem")
+// backup.AwaitStatus(ctx, bak, backup.StatusCanceled)
+//
+// // ALSO GOOD:
+// bak, _ := c.Backup.GetCreateStatus(ctx, "bak-1", "filesystem")
+// backup.AwaitStatus(ctx, bak, backup.StatusCanceled)
+//
+// // BAD:
+// backup.AwaitStatus(ctx, &backup.Info{ID: "bak-1"}, backup.StatusCanceled)
+//
+// AwaitStatus returns immediately if the *Info is nil, the backup's completed,
+// or reached the desired status. In the first 2 cases the returned error is not nil.
+func AwaitStatus(ctx context.Context, bak *Info, want Status, options ...AwaitOption) (*Info, error) {
+ if bak == nil {
+ return nil, fmt.Errorf("nil backup")
+ } else if bak.Status == want {
+ return bak, nil
+ } else if bak.IsCompleted() {
+ return bak, fmt.Errorf("await %s: %w", want, errBackupStatusFallthrough)
+ }
+
+ c := bak.c
+ if c == nil {
+ return nil, errBackupNotAwaitable
+ }
+
+ interval := defaultPollingInterval
+ for _, opt := range options {
+ opt(&interval)
+ }
+
+ if _, ok := ctx.Deadline(); !ok {
+ var cancel context.CancelFunc
+ ctx, cancel = context.WithDeadline(ctx, time.Now().Add(defaultTimeout))
+ defer cancel()
+ }
+
+ _, hasDeadline := ctx.Deadline()
+ dev.Assert(hasDeadline, "unbounded await context")
+
+ cfg := GetStatus{Backend: bak.Backend, ID: bak.ID}
+ current := bak
+ for {
+ select {
+ case <-ctx.Done():
+ return current, ctx.Err()
+ default:
+ latest, err := c.getStatus(ctx, cfg, bak.operation)
+ if err != nil {
+ return current, err
+ }
+ current = latest
+
+ dev.AssertNotNil(latest, "getStatus returned nil backup.Info")
+
+ if latest.Status == want {
+ return latest, nil
+ } else if latest.IsCompleted() {
+ return latest, fmt.Errorf("await %s: %w", want, errBackupStatusFallthrough)
+ }
+
+ // Sleep util the next poll interval, respect context.
+ select {
+ case <-time.After(time.Duration(interval)):
+ case <-ctx.Done():
+ return latest, ctx.Err()
+ }
+ }
+ }
+}
diff --git a/backup/wait_test.go b/backup/wait_test.go
new file mode 100644
index 00000000..fb52bf7a
--- /dev/null
+++ b/backup/wait_test.go
@@ -0,0 +1,195 @@
+package backup_test
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/weaviate/weaviate-go-client/v6/backup"
+ "github.com/weaviate/weaviate-go-client/v6/internal/api"
+ "github.com/weaviate/weaviate-go-client/v6/internal/testkit"
+)
+
+func TestAwaitStatus(t *testing.T) {
+ t.Run("nil backup", func(t *testing.T) {
+ got, err := backup.AwaitStatus(t.Context(), nil, backup.StatusSuccess)
+ assert.Nil(t, got, "must return nil *backup.Info")
+ assert.Error(t, err)
+ })
+
+ t.Run("invalid backup (must have *backup.Client reference)", func(t *testing.T) {
+ bak := &backup.Info{ID: "backup-1", Status: backup.StatusTransferring}
+
+ got, err := backup.AwaitStatus(t.Context(), bak, backup.StatusSuccess)
+
+ assert.Nil(t, got, "must return nil *backup.Info")
+ assert.Error(t, err)
+ })
+
+ // The cases below describe valid backups. Each case defines the resposes
+ // that AwaitStatus is expected to consume. Cases with nil/empty stubs
+ // expect AwaitStatus to return without making any more requests.
+ for _, tt := range []struct {
+ name string
+ initStatus backup.Status // Initial backup status, Started by default.
+ stubs []testkit.Stub[any, api.BackupInfo] // Responses for AwaitStatus.
+ awaitStatus backup.Status // Passed to AwaitStatus.
+ wantStatus backup.Status // Latest observed status.
+ ctx context.Context // Using t.Context() if nil.
+ err testkit.Error // Set to testkit.ExpectError or testkit.ErrorIs
+ }{
+ {
+ name: "backup in desired state",
+ initStatus: backup.StatusTransferring,
+ awaitStatus: backup.StatusTransferring,
+ wantStatus: backup.StatusTransferring,
+ },
+ {
+ name: "backup is already completed (status fallthrough)",
+ initStatus: backup.StatusSuccess,
+ awaitStatus: backup.StatusTransferring,
+ wantStatus: backup.StatusSuccess,
+ err: testkit.ExpectError,
+ },
+ {
+ name: "successful await",
+ stubs: []testkit.Stub[any, api.BackupInfo]{
+ {Response: api.BackupInfo{Status: api.BackupStatusTransferring}},
+ {Response: api.BackupInfo{Status: api.BackupStatusTransferring}},
+ {Response: api.BackupInfo{Status: api.BackupStatusTransferred}},
+ },
+ awaitStatus: backup.StatusTransferred,
+ wantStatus: backup.StatusTransferred,
+ },
+ {
+ name: "backup is canceled abruptly (status fallthrough)",
+ stubs: []testkit.Stub[any, api.BackupInfo]{
+ {Response: api.BackupInfo{Status: api.BackupStatusTransferring}},
+ {Response: api.BackupInfo{Status: api.BackupStatusTransferring}},
+ {Response: api.BackupInfo{Status: api.BackupStatusCanceled}},
+ },
+ awaitStatus: backup.StatusTransferred,
+ wantStatus: backup.StatusCanceled,
+ err: testkit.ExpectError,
+ },
+ {
+ name: "error while awaiting",
+ stubs: []testkit.Stub[any, api.BackupInfo]{
+ {Response: api.BackupInfo{Status: api.BackupStatusTransferring}},
+ {Err: errors.New("whaam!")},
+ },
+ awaitStatus: backup.StatusSuccess,
+ wantStatus: backup.StatusTransferring,
+ err: testkit.ExpectError,
+ },
+ {
+ name: "context is canceled",
+ ctx: ctxCanceled(),
+ awaitStatus: backup.StatusSuccess,
+ wantStatus: backup.StatusStarted,
+ err: testkit.ErrorIs(context.Canceled),
+ },
+ {
+ name: "context deadline exceeded",
+ stubs: []testkit.Stub[any, api.BackupInfo]{
+ {Response: api.BackupInfo{Status: api.BackupStatusStarted}},
+ },
+ ctx: ctxDoneOnSecondCheck(),
+ awaitStatus: backup.StatusSuccess,
+ wantStatus: backup.StatusStarted,
+ err: testkit.ErrorIs(context.DeadlineExceeded),
+ },
+ } {
+ t.Run(tt.name, func(t *testing.T) {
+ // Arrange
+ ctx := t.Context()
+ if tt.ctx != nil {
+ ctx = tt.ctx
+ }
+
+ initStatus := api.BackupStatusStarted
+ if tt.initStatus != "" {
+ initStatus = api.BackupStatus(tt.initStatus)
+ }
+
+ // The first response is always consumed by the test itself to GetCreateStatus.
+ transport := testkit.NewTransport(t, append([]testkit.Stub[any, api.BackupInfo]{
+ {Response: api.BackupInfo{Status: initStatus}},
+ }, tt.stubs...))
+ c := backup.NewClient(transport)
+ require.NotNil(t, c, "nil client")
+
+ // GetCreateStatus is part of test setup, always called with t.Context()
+ bak, err := c.GetCreateStatus(t.Context(), backup.GetStatus{})
+ require.NoError(t, err)
+ require.NotNil(t, bak, "nil backup from get-status")
+
+ // Act
+ got, err := backup.AwaitStatus(
+ ctx, bak, tt.awaitStatus,
+ backup.WithPollingInterval(0),
+ )
+
+ // Assert
+ tt.err.Require(t, err, "await status error")
+ assert.NotNil(t, got, "must return latest backup status")
+ assert.Equal(t, tt.wantStatus, got.Status, "latest status")
+ })
+ }
+}
+
+// ctxCanceled returns a canceled context.
+func ctxCanceled() context.Context {
+ ctx, cancel := context.WithCancel(context.Background())
+ defer cancel()
+ return ctx
+}
+
+// ctxDoneOnSecondCheck returns a context that is Done
+// after its Done method has been called twice.
+//
+// While this breaches the boundaries black-box testing, this
+// lets us reach a case in AwaitStatus where the deadline
+// expires while the goroutine is asleep without relying
+// on the real clock.
+func ctxDoneOnSecondCheck() context.Context {
+ return testkit.NewTickingContext(2)
+}
+
+func TestInfo_IsCompleted(t *testing.T) {
+ for _, tt := range []struct {
+ bak backup.Info
+ want bool
+ }{
+ {bak: backup.Info{Status: backup.StatusStarted}, want: false},
+ {bak: backup.Info{Status: backup.StatusTransferring}, want: false},
+ {bak: backup.Info{Status: backup.StatusTransferred}, want: false},
+ {bak: backup.Info{Status: backup.StatusSuccess}, want: true},
+ {bak: backup.Info{Status: backup.StatusFailed}, want: true},
+ {bak: backup.Info{Status: backup.StatusCanceled}, want: true},
+ } {
+ t.Run(fmt.Sprintf("status=%s", tt.bak.Status), func(t *testing.T) {
+ require.Equal(t, tt.want, tt.bak.IsCompleted())
+ })
+ }
+
+ t.Run("listed backups", func(t *testing.T) {
+ transport := testkit.NewTransport(t, []testkit.Stub[any, []api.BackupInfo]{
+ {Response: []api.BackupInfo{
+ {ID: "1"}, {ID: "2"}, {ID: "3"},
+ }},
+ })
+
+ c := backup.NewClient(transport)
+ require.NotNil(t, c, "nil backup client")
+
+ all, err := c.List(t.Context(), backup.List{})
+ assert.NoError(t, err)
+ for _, bak := range all {
+ assert.True(t, bak.IsCompleted(), "bak-%s: List must return completed backups", bak.ID)
+ }
+ })
+}
diff --git a/client.go b/client.go
index b45f5459..1496aa26 100644
--- a/client.go
+++ b/client.go
@@ -8,11 +8,13 @@ import (
"strings"
"time"
+ "github.com/weaviate/weaviate-go-client/v6/backup"
"github.com/weaviate/weaviate-go-client/v6/collections"
"github.com/weaviate/weaviate-go-client/v6/internal/api/transport"
)
type Client struct {
+ Backup *backup.Client
Collections *collections.Client
}
@@ -117,6 +119,7 @@ func newClient(_ context.Context, options []Option) (*Client, error) {
return &Client{
Collections: collections.NewClient(t),
+ Backup: backup.NewClient(t),
}, nil
}
diff --git a/client_test.go b/client_test.go
index 7eb87475..17b0edde 100644
--- a/client_test.go
+++ b/client_test.go
@@ -25,8 +25,8 @@ func TestNewLocal(t *testing.T) {
}
c, err := weaviate.NewLocal(t.Context())
- assert.NotNil(t, c, "nil client")
assert.NoError(t, err)
+ assert.NotNil(t, c, "nil client")
assert.Equal(t, transport.Config{
Scheme: "http",
@@ -61,8 +61,8 @@ func TestNewLocal(t *testing.T) {
weaviate.WithReadTimeout(20*time.Second),
weaviate.WithBatchTimeout(100*time.Millisecond),
)
- assert.NotNil(t, c, "nil client")
assert.NoError(t, err)
+ assert.NotNil(t, c, "nil client")
assert.Equal(t, transport.Config{
// Defaults
@@ -99,8 +99,8 @@ func TestNewWeaviateCloud(t *testing.T) {
}
c, err := weaviate.NewWeaviateCloud(t.Context(), "example.com", "api-key")
- assert.NotNil(t, c, "nil client")
assert.NoError(t, err)
+ assert.NotNil(t, c, "nil client")
assert.Equal(t, transport.Config{
Scheme: "https",
@@ -132,8 +132,8 @@ func TestNewWeaviateCloud(t *testing.T) {
}
c, err := weaviate.NewWeaviateCloud(t.Context(), "my."+domain, "api-key")
- assert.NotNil(t, c, "nil client")
assert.NoError(t, err)
+ assert.NotNil(t, c, "nil client")
assert.Equal(t, got.Header, http.Header{
"X-Weaviate-Client": {"weaviate-client-go" + "/" + weaviate.Version()},
@@ -159,8 +159,8 @@ func TestNewWeaviateCloud(t *testing.T) {
weaviate.WithReadTimeout(20*time.Second),
weaviate.WithBatchTimeout(100*time.Millisecond),
)
- assert.NotNil(t, c, "nil client")
assert.NoError(t, err)
+ assert.NotNil(t, c, "nil client")
assert.Equal(t, transport.Config{
Scheme: "https",
@@ -179,4 +179,17 @@ func TestNewWeaviateCloud(t *testing.T) {
},
}, got)
})
+
+ t.Run("namespaces", func(t *testing.T) {
+ transport.New = func(cfg transport.Config) (internal.Transport, error) {
+ return testkit.NopTransport, nil
+ }
+
+ c, err := weaviate.NewClient(t.Context())
+ assert.NoError(t, err)
+ assert.NotNil(t, c, "nil client")
+
+ assert.NotNil(t, c.Collections, "nil collections")
+ assert.NotNil(t, c.Backup, "nil backup")
+ })
}
diff --git a/collections/client.go b/collections/client.go
index 7beeae46..f22d37cc 100644
--- a/collections/client.go
+++ b/collections/client.go
@@ -13,7 +13,7 @@ import (
)
func NewClient(t internal.Transport) *Client {
- dev.AssertNotNil(t, "t")
+ dev.AssertNotNil(t, "transport")
return &Client{t: t}
}
diff --git a/data/client.go b/data/client.go
index 3bf0417d..abb1a0f3 100644
--- a/data/client.go
+++ b/data/client.go
@@ -14,7 +14,7 @@ import (
)
func NewClient(t internal.Transport, rd api.RequestDefaults) *Client {
- dev.AssertNotNil(t, "nil transport")
+ dev.AssertNotNil(t, "transport")
return &Client{
transport: t,
diff --git a/data/client_test.go b/data/client_test.go
index 107161e6..fa56fecf 100644
--- a/data/client_test.go
+++ b/data/client_test.go
@@ -26,10 +26,10 @@ func TestClient_Insert(t *testing.T) {
for _, tt := range []struct {
name string
- object *data.Object // Object to be inserted.
- want *types.Object[map[string]any] // Expected return value.
+ object *data.Object // Object to be inserted.
stubs []testkit.Stub[api.InsertObjectRequest, api.InsertObjectResponse]
- err testkit.Error
+ want *types.Object[map[string]any] // Expected return value.
+ err testkit.Error // Expected error.
}{
{
name: "nil object",
diff --git a/internal/api/backup.go b/internal/api/backup.go
new file mode 100644
index 00000000..54b2fa35
--- /dev/null
+++ b/internal/api/backup.go
@@ -0,0 +1,254 @@
+package api
+
+import (
+ "encoding/json"
+ "net/http"
+ "net/url"
+ "time"
+
+ "github.com/weaviate/weaviate-go-client/v6/internal/api/internal/gen/rest"
+ "github.com/weaviate/weaviate-go-client/v6/internal/transports"
+)
+
+type BackupInfo struct {
+ Backend string
+ ID string
+ Bucket string
+ Path string
+ Error string
+ Status BackupStatus
+
+ StartedAt *time.Time
+ CompletedAt *time.Time
+ IncludesCollections []string
+ SizeGiB *float32
+}
+
+var _ json.Unmarshaler = (*BackupInfo)(nil)
+
+type BackupCompressionLevel string
+
+const (
+ BackupCompressionLevelDefault BackupCompressionLevel = BackupCompressionLevel(rest.DefaultCompression)
+ BackupCompressionLevelBestSpeed BackupCompressionLevel = BackupCompressionLevel(rest.BestSpeed)
+ BackupCompressionLevelBestCompression BackupCompressionLevel = BackupCompressionLevel(rest.BestCompression)
+ BackupCompressionLevelZstdDefault BackupCompressionLevel = BackupCompressionLevel(rest.ZstdDefaultCompression)
+ BackupCompressionLevelZstdBestSpeed BackupCompressionLevel = BackupCompressionLevel(rest.ZstdBestSpeed)
+ BackupCompressionLevelZstdBestCompression BackupCompressionLevel = BackupCompressionLevel(rest.ZstdBestCompression)
+ BackupCompressionLevelNone BackupCompressionLevel = BackupCompressionLevel(rest.NoCompression)
+)
+
+type BackupStatus string
+
+const (
+ BackupStatusStarted BackupStatus = BackupStatus(rest.BackupListResponseStatusSTARTED)
+ BackupStatusTransferring BackupStatus = BackupStatus(rest.BackupListResponseStatusTRANSFERRING)
+ BackupStatusTransferred BackupStatus = BackupStatus(rest.BackupListResponseStatusTRANSFERRED)
+ BackupStatusFinalizing BackupStatus = BackupStatus(rest.BackupListResponseStatusFINALIZING)
+ BackupStatusCanceling BackupStatus = BackupStatus(rest.BackupListResponseStatusCANCELLING)
+ BackupStatusCanceled BackupStatus = BackupStatus(rest.BackupListResponseStatusCANCELED)
+ BackupStatusSuccess BackupStatus = BackupStatus(rest.BackupListResponseStatusSUCCESS)
+ BackupStatusFailed BackupStatus = BackupStatus(rest.BackupListResponseStatusFAILED)
+)
+
+type RBACRestoreOption string
+
+const (
+ RBACRestoreAll RBACRestoreOption = RBACRestoreOption(rest.All)
+ RBACRestoreNone RBACRestoreOption = RBACRestoreOption(rest.NoRestore)
+)
+
+type BackupOperation int
+
+const (
+ BackupOperationCreate BackupOperation = iota
+ BackupOperationRestore
+)
+
+type CreateBackupRequest struct {
+ transports.BaseEndpoint
+
+ Backend string // Required: backend storage.
+ ID string // Required: backup ID.
+ BackupPath string
+ Endpoint string
+ Bucket string
+ IncludeCollections []string
+ ExcludeCollections []string
+ PrefixIncremental string
+ MaxCPUPercentage int
+ ChunkSizeMiB int
+ CompressionLevel BackupCompressionLevel
+}
+
+// Compile-time assertion that CreateBackupRequest implements [transports.Endpoint].
+var (
+ _ transports.Endpoint = (*CreateBackupRequest)(nil)
+ _ json.Marshaler = (*CreateBackupRequest)(nil)
+)
+
+func (*CreateBackupRequest) Method() string { return http.MethodPost }
+func (r *CreateBackupRequest) Path() string { return "/backups/" + r.Backend }
+func (r *CreateBackupRequest) Body() any { return r }
+
+type RestoreBackupRequest struct {
+ transports.BaseEndpoint
+
+ Backend string
+ ID string
+ BackupPath string
+ Endpoint string
+ Bucket string
+ IncludeCollections []string
+ ExcludeCollections []string
+ MaxCPUPercentage int
+ OverwriteAlias bool
+ RestoreUsers RBACRestoreOption
+ RestoreRoles RBACRestoreOption
+ NodeMapping map[string]string
+}
+
+// Compile-time assertion that RestoreBackupRequest implements [transports.Endpoint].
+var (
+ _ transports.Endpoint = (*RestoreBackupRequest)(nil)
+ _ json.Marshaler = (*RestoreBackupRequest)(nil)
+)
+
+func (*RestoreBackupRequest) Method() string { return http.MethodPost }
+func (r *RestoreBackupRequest) Path() string {
+ return "/backups/" + r.Backend + "/" + r.ID + "/restore"
+}
+func (r *RestoreBackupRequest) Body() any { return r }
+
+type BackupStatusRequest struct {
+ transports.BaseEndpoint
+
+ Backend string
+ ID string
+ Operation BackupOperation
+}
+
+// Compile-time assertion that BackupStatusRequest implements [transports.Endpoint].
+var _ transports.Endpoint = (*BackupStatusRequest)(nil)
+
+func (*BackupStatusRequest) Method() string { return http.MethodGet }
+func (r *BackupStatusRequest) Path() string {
+ path := "/backups/" + r.Backend + "/" + r.ID
+ if r.Operation == BackupOperationRestore {
+ path += "/restore"
+ }
+ return path
+}
+
+// ListBackupsRequest fetches all requests in a backend storage.
+type ListBackupsRequest struct {
+ transports.BaseEndpoint
+
+ Backend string
+ StartingTimeAsc bool
+}
+
+// Compile-time assertion that ListBackupsRequest implements [transports.Endpoint].
+var _ transports.Endpoint = (*ListBackupsRequest)(nil)
+
+func (*ListBackupsRequest) Method() string { return http.MethodGet }
+func (r *ListBackupsRequest) Path() string { return "/backups/" + r.Backend }
+func (r *ListBackupsRequest) Query() url.Values {
+ if !r.StartingTimeAsc {
+ return nil
+ }
+ return url.Values{"order": {"asc"}}
+}
+
+type CancelBackupRequest struct {
+ transports.BaseEndpoint
+
+ Backend string
+ ID string
+ Operation BackupOperation
+}
+
+// Compile-time assertion that CancelBackupRequest implements [transports.Endpoint].
+var _ transports.Endpoint = (*CancelBackupRequest)(nil)
+
+func (*CancelBackupRequest) Method() string { return http.MethodDelete }
+func (r *CancelBackupRequest) Path() string {
+ path := "/backups/" + r.Backend + "/" + r.ID
+ if r.Operation == BackupOperationRestore {
+ path += "/restore"
+ }
+ return path
+}
+
+// MarshalJSON implements json.Marshaler via rest.BackupCreateRequest.
+func (r *CreateBackupRequest) MarshalJSON() ([]byte, error) {
+ req := &rest.BackupCreateRequest{
+ Id: r.ID,
+ Include: r.IncludeCollections,
+ Exclude: r.ExcludeCollections,
+ IncrementalBaseBackupId: r.PrefixIncremental,
+ Config: rest.BackupConfig{
+ Path: r.BackupPath,
+ Bucket: r.Bucket,
+ Endpoint: r.Endpoint,
+ CPUPercentage: r.MaxCPUPercentage,
+ ChunkSize: r.ChunkSizeMiB,
+ CompressionLevel: rest.BackupConfigCompressionLevel(r.CompressionLevel),
+ },
+ }
+ return json.Marshal(req)
+}
+
+// MarshalJSON implements json.Marshaler via rest.BackupRestoreRequest.
+func (r *RestoreBackupRequest) MarshalJSON() ([]byte, error) {
+ req := &rest.BackupRestoreRequest{
+ Include: r.IncludeCollections,
+ Exclude: r.ExcludeCollections,
+ OverwriteAlias: r.OverwriteAlias,
+ NodeMapping: r.NodeMapping,
+ Config: rest.RestoreConfig{
+ Bucket: r.Bucket,
+ Path: r.BackupPath,
+ Endpoint: r.Endpoint,
+ CPUPercentage: r.MaxCPUPercentage,
+ RolesOptions: rest.RestoreConfigRolesOptions(r.RestoreRoles),
+ UsersOptions: rest.RestoreConfigUsersOptions(r.RestoreUsers),
+ },
+ }
+ return json.Marshal(req)
+}
+
+// UnmarshalJSON implements json.Unmarshaler.
+func (b *BackupInfo) UnmarshalJSON(data []byte) error {
+ var bak struct {
+ Backend string `json:"backend,omitempty"`
+ ID string `json:"id,omitempty"`
+ Bucket string `json:"bucket,omitempty"`
+ Path string `json:"path,omitempty"`
+ Error string `json:"error,omitempty"`
+ Status BackupStatus `json:"status,omitempty"`
+
+ IncludesCollections []string `json:"classes,omitempty"`
+ StartedAt *time.Time `json:"startedAt,omitempty"`
+ CompletedAt *time.Time `json:"completedAt,omitempty"`
+ SizeGiB *float32 `json:"size,omitempty"`
+ }
+
+ if err := json.Unmarshal(data, &bak); err != nil {
+ return err
+ }
+
+ *b = BackupInfo{
+ Backend: bak.Backend,
+ ID: bak.ID,
+ Bucket: bak.Bucket,
+ Path: bak.Path,
+ Error: bak.Error,
+ Status: bak.Status,
+ StartedAt: bak.StartedAt,
+ CompletedAt: bak.CompletedAt,
+ IncludesCollections: bak.IncludesCollections,
+ SizeGiB: bak.SizeGiB,
+ }
+ return nil
+}
diff --git a/internal/api/endpoint_test.go b/internal/api/endpoint_test.go
index 5e8c3d66..ae381bde 100644
--- a/internal/api/endpoint_test.go
+++ b/internal/api/endpoint_test.go
@@ -30,7 +30,9 @@ import (
// from internal/api/gen/rest package, as it is guaranteed to produce a valid
// JSON, giving you a more useful comparison in the tests.
func TestRESTRequests(t *testing.T) {
- for _, tt := range []struct {
+ for _, tt := range testkit.WithOnly(t, []struct {
+ testkit.Only
+
name string
req any // Request object.
@@ -415,7 +417,130 @@ func TestRESTRequests(t *testing.T) {
wantMethod: http.MethodDelete,
wantPath: "/schema/Songs",
},
- } {
+ {
+ name: "create backup request",
+ req: &api.CreateBackupRequest{
+ Backend: "filesystem",
+ ID: "bak-1",
+ BackupPath: "/path/to/backup",
+ Endpoint: "s3.amazonaws.com",
+ Bucket: "my-backups",
+ IncludeCollections: []string{"Songs"},
+ ExcludeCollections: []string{"Pizza"},
+ PrefixIncremental: "incr-bak-",
+ MaxCPUPercentage: 92,
+ ChunkSizeMiB: 20,
+ CompressionLevel: api.BackupCompressionLevelDefault,
+ },
+ wantMethod: http.MethodPost,
+ wantPath: "/backups/filesystem",
+ wantBody: &rest.BackupCreateRequest{
+ Id: "bak-1",
+ Include: []string{"Songs"},
+ Exclude: []string{"Pizza"},
+ IncrementalBaseBackupId: "incr-bak-",
+ Config: rest.BackupConfig{
+ Path: "/path/to/backup",
+ Bucket: "my-backups",
+ Endpoint: "s3.amazonaws.com",
+ CPUPercentage: 92,
+ ChunkSize: 20,
+ CompressionLevel: rest.DefaultCompression,
+ },
+ },
+ },
+ {
+ name: "restore backup request",
+ req: &api.RestoreBackupRequest{
+ Backend: "filesystem",
+ ID: "bak-1",
+ BackupPath: "/path/to/backup",
+ Endpoint: "s3.amazonaws.com",
+ Bucket: "my-backups",
+ IncludeCollections: []string{"Songs"},
+ ExcludeCollections: []string{"Pizza"},
+ MaxCPUPercentage: 92,
+ OverwriteAlias: true,
+ RestoreUsers: api.RBACRestoreAll,
+ RestoreRoles: api.RBACRestoreNone,
+ NodeMapping: map[string]string{"node-1": "node-a"},
+ },
+ wantMethod: http.MethodPost,
+ wantPath: "/backups/filesystem/bak-1/restore",
+ wantBody: &rest.BackupRestoreRequest{
+ Include: []string{"Songs"},
+ Exclude: []string{"Pizza"},
+ OverwriteAlias: true,
+ NodeMapping: map[string]string{"node-1": "node-a"},
+ Config: rest.RestoreConfig{
+ Path: "/path/to/backup",
+ Bucket: "my-backups",
+ Endpoint: "s3.amazonaws.com",
+ CPUPercentage: 92,
+ UsersOptions: rest.All,
+ RolesOptions: rest.RestoreConfigRolesOptionsNoRestore,
+ },
+ },
+ },
+ {
+ name: "get backup create status",
+ req: &api.BackupStatusRequest{
+ Backend: "filesystem",
+ ID: "bak-1",
+ Operation: api.BackupOperationCreate,
+ },
+ wantMethod: http.MethodGet,
+ wantPath: "/backups/filesystem/bak-1",
+ },
+ {
+ name: "get backup restore status",
+ req: &api.BackupStatusRequest{
+ Backend: "filesystem",
+ ID: "bak-1",
+ Operation: api.BackupOperationRestore,
+ },
+ wantMethod: http.MethodGet,
+ wantPath: "/backups/filesystem/bak-1/restore",
+ },
+ {
+ name: "list backups",
+ req: &api.ListBackupsRequest{
+ Backend: "filesystem",
+ },
+ wantMethod: http.MethodGet,
+ wantPath: "/backups/filesystem",
+ },
+ {
+ name: "list backups order by starting time",
+ req: &api.ListBackupsRequest{
+ Backend: "filesystem",
+ StartingTimeAsc: true,
+ },
+ wantMethod: http.MethodGet,
+ wantPath: "/backups/filesystem",
+ wantQuery: url.Values{"order": {"asc"}},
+ },
+ {
+ name: "cancel backup create",
+ req: &api.CancelBackupRequest{
+ Backend: "filesystem",
+ ID: "bak-1",
+ Operation: api.BackupOperationCreate,
+ },
+ wantMethod: http.MethodDelete,
+ wantPath: "/backups/filesystem/bak-1",
+ },
+ {
+ name: "cancel backup restore",
+ req: &api.CancelBackupRequest{
+ Backend: "filesystem",
+ ID: "bak-1",
+ Operation: api.BackupOperationRestore,
+ },
+ wantMethod: http.MethodDelete,
+ wantPath: "/backups/filesystem/bak-1/restore",
+ },
+ }) {
t.Run(tt.name, func(t *testing.T) {
require.Implements(t, (*transports.Endpoint)(nil), tt.req)
endpoint := (tt.req).(transports.Endpoint)
@@ -711,6 +836,104 @@ func TestRESTResponses(t *testing.T) {
},
},
},
+ {
+ name: "backup create response",
+ body: &rest.BackupCreateResponse{
+ Backend: "filesystem",
+ Id: "bak-1",
+ Bucket: "my-backups",
+ Path: "/path/to/backup",
+ Classes: []string{"Songs"},
+ Error: "whaam!",
+ Status: rest.BackupCreateResponseStatusFAILED,
+ },
+ dest: new(api.BackupInfo),
+ want: &api.BackupInfo{
+ Backend: "filesystem",
+ ID: "bak-1",
+ Bucket: "my-backups",
+ Path: "/path/to/backup",
+ IncludesCollections: []string{"Songs"},
+ Error: "whaam!",
+ Status: api.BackupStatusFailed,
+ },
+ },
+ {
+ name: "backup restore response",
+ body: &rest.BackupRestoreResponse{
+ Backend: "filesystem",
+ Id: "bak-1",
+ Path: "/path/to/backup",
+ Classes: []string{"Songs"},
+ Error: "whaam!",
+ Status: rest.BackupRestoreResponseStatusFAILED,
+ },
+ dest: new(api.BackupInfo),
+ want: &api.BackupInfo{
+ Backend: "filesystem",
+ ID: "bak-1",
+ Path: "/path/to/backup",
+ IncludesCollections: []string{"Songs"},
+ Error: "whaam!",
+ Status: api.BackupStatusFailed,
+ },
+ },
+ {
+ name: "backup create status response",
+ body: &rest.BackupCreateStatusResponse{
+ Backend: "filesystem",
+ Id: "bak-1",
+ Path: "/path/to/backup",
+ Status: rest.BackupCreateStatusResponseStatusSUCCESS,
+ Size: 92,
+ },
+ dest: new(api.BackupInfo),
+ want: &api.BackupInfo{
+ Backend: "filesystem",
+ ID: "bak-1",
+ Path: "/path/to/backup",
+ Status: api.BackupStatusSuccess,
+ SizeGiB: testkit.Ptr[float32](92),
+ StartedAt: testkit.Ptr(time.Time{}),
+ CompletedAt: testkit.Ptr(time.Time{}),
+ },
+ },
+ {
+ name: "backup list response",
+ body: rest.BackupListResponse{
+ {
+ Id: "bak-1",
+ Classes: []string{"Artists"},
+ Status: rest.BackupListResponseStatusTRANSFERRING,
+ Size: 92,
+ },
+ {
+ Id: "bak-2",
+ Classes: []string{"Songs"},
+ Status: rest.BackupListResponseStatusTRANSFERRED,
+ Size: 80085,
+ },
+ },
+ dest: new([]api.BackupInfo),
+ want: &[]api.BackupInfo{
+ {
+ ID: "bak-1",
+ IncludesCollections: []string{"Artists"},
+ Status: api.BackupStatusTransferring,
+ SizeGiB: testkit.Ptr[float32](92),
+ StartedAt: testkit.Ptr(time.Time{}),
+ CompletedAt: testkit.Ptr(time.Time{}),
+ },
+ {
+ ID: "bak-2",
+ IncludesCollections: []string{"Songs"},
+ Status: api.BackupStatusTransferred,
+ SizeGiB: testkit.Ptr[float32](80085),
+ StartedAt: testkit.Ptr(time.Time{}),
+ CompletedAt: testkit.Ptr(time.Time{}),
+ },
+ },
+ },
} {
t.Run(tt.name, func(t *testing.T) {
require.NotNil(t, tt.body, "incomplete test case: body is nil")
diff --git a/internal/api/internal/gen/proto/v1/base_search.pb.go b/internal/api/internal/gen/proto/v1/base_search.pb.go
index 00bbff2c..ba533926 100644
--- a/internal/api/internal/gen/proto/v1/base_search.pb.go
+++ b/internal/api/internal/gen/proto/v1/base_search.pb.go
@@ -410,8 +410,9 @@ type Hybrid struct {
// protolint:disable:next REPEATED_FIELD_NAMES_PLURALIZED
//
// Deprecated: Marked as deprecated in v1/base_search.proto.
- Vector []float32 `protobuf:"fixed32,3,rep,packed,name=vector,proto3" json:"vector,omitempty"` // will be removed in the future, use vectors
- Alpha float32 `protobuf:"fixed32,4,opt,name=alpha,proto3" json:"alpha,omitempty"`
+ Vector []float32 `protobuf:"fixed32,3,rep,packed,name=vector,proto3" json:"vector,omitempty"` // will be removed in the future, use vectors
+ // Deprecated: Marked as deprecated in v1/base_search.proto.
+ Alpha float32 `protobuf:"fixed32,4,opt,name=alpha,proto3" json:"alpha,omitempty"` // deprecated in 1.36.0 - use alpha_param
FusionType Hybrid_FusionType `protobuf:"varint,5,opt,name=fusion_type,json=fusionType,proto3,enum=weaviate.v1.Hybrid_FusionType" json:"fusion_type,omitempty"`
// Deprecated: Marked as deprecated in v1/base_search.proto.
VectorBytes []byte `protobuf:"bytes,6,opt,name=vector_bytes,json=vectorBytes,proto3" json:"vector_bytes,omitempty"` // deprecated in 1.29.0 - use vectors
@@ -421,6 +422,7 @@ type Hybrid struct {
NearVector *NearVector `protobuf:"bytes,9,opt,name=near_vector,json=nearVector,proto3" json:"near_vector,omitempty"` // same as above. Use the target vector in the hybrid message
Targets *Targets `protobuf:"bytes,10,opt,name=targets,proto3" json:"targets,omitempty"`
Bm25SearchOperator *SearchOperatorOptions `protobuf:"bytes,11,opt,name=bm25_search_operator,json=bm25SearchOperator,proto3,oneof" json:"bm25_search_operator,omitempty"`
+ AlphaParam *float32 `protobuf:"fixed32,12,opt,name=alpha_param,json=alphaParam,proto3,oneof" json:"alpha_param,omitempty"`
// only vector distance, but keep it extendable
//
// Types that are valid to be assigned to Threshold:
@@ -484,6 +486,7 @@ func (x *Hybrid) GetVector() []float32 {
return nil
}
+// Deprecated: Marked as deprecated in v1/base_search.proto.
func (x *Hybrid) GetAlpha() float32 {
if x != nil {
return x.Alpha
@@ -542,6 +545,13 @@ func (x *Hybrid) GetBm25SearchOperator() *SearchOperatorOptions {
return nil
}
+func (x *Hybrid) GetAlphaParam() float32 {
+ if x != nil && x.AlphaParam != nil {
+ return *x.AlphaParam
+ }
+ return 0
+}
+
func (x *Hybrid) GetThreshold() isHybrid_Threshold {
if x != nil {
return x.Threshold
@@ -1477,14 +1487,14 @@ const file_v1_base_search_proto_rawDesc = "" +
"\x14OPERATOR_UNSPECIFIED\x10\x00\x12\x0f\n" +
"\vOPERATOR_OR\x10\x01\x12\x10\n" +
"\fOPERATOR_AND\x10\x02B\x1a\n" +
- "\x18_minimum_or_tokens_match\"\xe6\x05\n" +
+ "\x18_minimum_or_tokens_match\"\xa0\x06\n" +
"\x06Hybrid\x12\x14\n" +
"\x05query\x18\x01 \x01(\tR\x05query\x12\x1e\n" +
"\n" +
"properties\x18\x02 \x03(\tR\n" +
"properties\x12\x1a\n" +
- "\x06vector\x18\x03 \x03(\x02B\x02\x18\x01R\x06vector\x12\x14\n" +
- "\x05alpha\x18\x04 \x01(\x02R\x05alpha\x12?\n" +
+ "\x06vector\x18\x03 \x03(\x02B\x02\x18\x01R\x06vector\x12\x18\n" +
+ "\x05alpha\x18\x04 \x01(\x02B\x02\x18\x01R\x05alpha\x12?\n" +
"\vfusion_type\x18\x05 \x01(\x0e2\x1e.weaviate.v1.Hybrid.FusionTypeR\n" +
"fusionType\x12%\n" +
"\fvector_bytes\x18\x06 \x01(\fB\x02\x18\x01R\vvectorBytes\x12)\n" +
@@ -1494,7 +1504,9 @@ const file_v1_base_search_proto_rawDesc = "" +
"nearVector\x12.\n" +
"\atargets\x18\n" +
" \x01(\v2\x14.weaviate.v1.TargetsR\atargets\x12Y\n" +
- "\x14bm25_search_operator\x18\v \x01(\v2\".weaviate.v1.SearchOperatorOptionsH\x01R\x12bm25SearchOperator\x88\x01\x01\x12)\n" +
+ "\x14bm25_search_operator\x18\v \x01(\v2\".weaviate.v1.SearchOperatorOptionsH\x01R\x12bm25SearchOperator\x88\x01\x01\x12$\n" +
+ "\valpha_param\x18\f \x01(\x02H\x02R\n" +
+ "alphaParam\x88\x01\x01\x12)\n" +
"\x0fvector_distance\x18\x14 \x01(\x02H\x00R\x0evectorDistance\x12.\n" +
"\avectors\x18\x15 \x03(\v2\x14.weaviate.v1.VectorsR\avectors\"a\n" +
"\n" +
@@ -1503,7 +1515,8 @@ const file_v1_base_search_proto_rawDesc = "" +
"\x12FUSION_TYPE_RANKED\x10\x01\x12\x1e\n" +
"\x1aFUSION_TYPE_RELATIVE_SCORE\x10\x02B\v\n" +
"\tthresholdB\x17\n" +
- "\x15_bm25_search_operator\"\xa7\x04\n" +
+ "\x15_bm25_search_operatorB\x0e\n" +
+ "\f_alpha_param\"\xa7\x04\n" +
"\n" +
"NearVector\x12\x1a\n" +
"\x06vector\x18\x01 \x03(\x02B\x02\x18\x01R\x06vector\x12!\n" +
diff --git a/internal/api/internal/gen/rest/models.go b/internal/api/internal/gen/rest/models.go
index 2597ea41..57ca3ce4 100644
--- a/internal/api/internal/gen/rest/models.go
+++ b/internal/api/internal/gen/rest/models.go
@@ -27,7 +27,9 @@ const (
// Defines values for BackupCreateResponseStatus.
const (
BackupCreateResponseStatusCANCELED BackupCreateResponseStatus = "CANCELED"
+ BackupCreateResponseStatusCANCELLING BackupCreateResponseStatus = "CANCELLING"
BackupCreateResponseStatusFAILED BackupCreateResponseStatus = "FAILED"
+ BackupCreateResponseStatusFINALIZING BackupCreateResponseStatus = "FINALIZING"
BackupCreateResponseStatusSTARTED BackupCreateResponseStatus = "STARTED"
BackupCreateResponseStatusSUCCESS BackupCreateResponseStatus = "SUCCESS"
BackupCreateResponseStatusTRANSFERRED BackupCreateResponseStatus = "TRANSFERRED"
@@ -37,7 +39,9 @@ const (
// Defines values for BackupCreateStatusResponseStatus.
const (
BackupCreateStatusResponseStatusCANCELED BackupCreateStatusResponseStatus = "CANCELED"
+ BackupCreateStatusResponseStatusCANCELLING BackupCreateStatusResponseStatus = "CANCELLING"
BackupCreateStatusResponseStatusFAILED BackupCreateStatusResponseStatus = "FAILED"
+ BackupCreateStatusResponseStatusFINALIZING BackupCreateStatusResponseStatus = "FINALIZING"
BackupCreateStatusResponseStatusSTARTED BackupCreateStatusResponseStatus = "STARTED"
BackupCreateStatusResponseStatusSUCCESS BackupCreateStatusResponseStatus = "SUCCESS"
BackupCreateStatusResponseStatusTRANSFERRED BackupCreateStatusResponseStatus = "TRANSFERRED"
@@ -47,7 +51,9 @@ const (
// Defines values for BackupListResponseStatus.
const (
BackupListResponseStatusCANCELED BackupListResponseStatus = "CANCELED"
+ BackupListResponseStatusCANCELLING BackupListResponseStatus = "CANCELLING"
BackupListResponseStatusFAILED BackupListResponseStatus = "FAILED"
+ BackupListResponseStatusFINALIZING BackupListResponseStatus = "FINALIZING"
BackupListResponseStatusSTARTED BackupListResponseStatus = "STARTED"
BackupListResponseStatusSUCCESS BackupListResponseStatus = "SUCCESS"
BackupListResponseStatusTRANSFERRED BackupListResponseStatus = "TRANSFERRED"
@@ -57,7 +63,9 @@ const (
// Defines values for BackupRestoreResponseStatus.
const (
BackupRestoreResponseStatusCANCELED BackupRestoreResponseStatus = "CANCELED"
+ BackupRestoreResponseStatusCANCELLING BackupRestoreResponseStatus = "CANCELLING"
BackupRestoreResponseStatusFAILED BackupRestoreResponseStatus = "FAILED"
+ BackupRestoreResponseStatusFINALIZING BackupRestoreResponseStatus = "FINALIZING"
BackupRestoreResponseStatusSTARTED BackupRestoreResponseStatus = "STARTED"
BackupRestoreResponseStatusSUCCESS BackupRestoreResponseStatus = "SUCCESS"
BackupRestoreResponseStatusTRANSFERRED BackupRestoreResponseStatus = "TRANSFERRED"
@@ -67,7 +75,9 @@ const (
// Defines values for BackupRestoreStatusResponseStatus.
const (
BackupRestoreStatusResponseStatusCANCELED BackupRestoreStatusResponseStatus = "CANCELED"
+ BackupRestoreStatusResponseStatusCANCELLING BackupRestoreStatusResponseStatus = "CANCELLING"
BackupRestoreStatusResponseStatusFAILED BackupRestoreStatusResponseStatus = "FAILED"
+ BackupRestoreStatusResponseStatusFINALIZING BackupRestoreStatusResponseStatus = "FINALIZING"
BackupRestoreStatusResponseStatusSTARTED BackupRestoreStatusResponseStatus = "STARTED"
BackupRestoreStatusResponseStatusSUCCESS BackupRestoreStatusResponseStatus = "SUCCESS"
BackupRestoreStatusResponseStatusTRANSFERRED BackupRestoreStatusResponseStatus = "TRANSFERRED"
@@ -100,6 +110,19 @@ const (
DBUserInfoDbUserTypeDbUser DBUserInfoDbUserType = "db_user"
)
+// Defines values for ExportCreateResponseStatus.
+const (
+ ExportCreateResponseStatusSTARTED ExportCreateResponseStatus = "STARTED"
+)
+
+// Defines values for ExportStatusResponseStatus.
+const (
+ ExportStatusResponseStatusFAILED ExportStatusResponseStatus = "FAILED"
+ ExportStatusResponseStatusSTARTED ExportStatusResponseStatus = "STARTED"
+ ExportStatusResponseStatusSUCCESS ExportStatusResponseStatus = "SUCCESS"
+ ExportStatusResponseStatusTRANSFERRING ExportStatusResponseStatus = "TRANSFERRING"
+)
+
// Defines values for GroupType.
const (
GroupTypeOidc GroupType = "oidc"
@@ -136,8 +159,8 @@ const (
// Defines values for ObjectsGetResponseResultStatus.
const (
- ObjectsGetResponseResultStatusFAILED ObjectsGetResponseResultStatus = "FAILED"
- ObjectsGetResponseResultStatusSUCCESS ObjectsGetResponseResultStatus = "SUCCESS"
+ FAILED ObjectsGetResponseResultStatus = "FAILED"
+ SUCCESS ObjectsGetResponseResultStatus = "SUCCESS"
)
// Defines values for PermissionAction.
@@ -244,6 +267,14 @@ const (
NoRestore RestoreConfigUsersOptions = "noRestore"
)
+// Defines values for ShardProgressStatus.
+const (
+ ShardProgressStatusFAILED ShardProgressStatus = "FAILED"
+ ShardProgressStatusSTARTED ShardProgressStatus = "STARTED"
+ ShardProgressStatusSUCCESS ShardProgressStatus = "SUCCESS"
+ ShardProgressStatusTRANSFERRING ShardProgressStatus = "TRANSFERRING"
+)
+
// Defines values for StatisticsStatus.
const (
StatisticsStatusHEALTHY StatisticsStatus = "HEALTHY"
@@ -412,6 +443,9 @@ type BackupCreateRequest struct {
// Include List of collections to include in the backup creation process. If not set, all collections are included. Cannot be used together with `exclude`.
Include []string `json:"include,omitempty"`
+
+ // IncrementalBaseBackupId The ID of an existing backup to use as the base for a file-based incremental backup. If set, only files that have changed since the base backup will be included in the new backup.
+ IncrementalBaseBackupId string `json:"incremental_base_backup_id"`
}
// BackupCreateResponse The definition of a backup create response body
@@ -872,6 +906,84 @@ type ErrorResponse struct {
} `json:"error,omitempty"`
}
+// ExportCreateRequest Request to create a new export operation
+type ExportCreateRequest struct {
+ // Config Backend-specific configuration
+ Config struct {
+ // Bucket Bucket, container, or volume name for cloud storage backends
+ Bucket string `json:"bucket,omitempty"`
+
+ // Path Path prefix within the bucket or filesystem
+ Path string `json:"path,omitempty"`
+ } `json:"config,omitempty"`
+
+ // Exclude List of collection names to exclude from the export. Cannot be used with 'include'.
+ Exclude []string `json:"exclude,omitempty"`
+
+ // Id Unique identifier for this export. Must be URL-safe.
+ Id string `json:"id"`
+
+ // Include List of collection names to include in the export. Cannot be used with 'exclude'.
+ Include []string `json:"include,omitempty"`
+}
+
+// ExportCreateResponse Response from creating an export operation
+type ExportCreateResponse struct {
+ // Backend The backend storage system used
+ Backend string `json:"backend,omitempty"`
+
+ // Classes List of collections being exported
+ Classes []string `json:"classes,omitempty"`
+
+ // Id Unique identifier for this export
+ Id string `json:"id,omitempty"`
+
+ // Path Full path where the export is being written
+ Path string `json:"path,omitempty"`
+
+ // StartedAt When the export started
+ StartedAt time.Time `json:"startedAt,omitempty"`
+
+ // Status Current status of the export
+ Status ExportCreateResponseStatus `json:"status,omitempty"`
+}
+
+// ExportCreateResponseStatus Current status of the export
+type ExportCreateResponseStatus string
+
+// ExportStatusResponse Current status of an export operation
+type ExportStatusResponse struct {
+ // Backend The backend storage system used
+ Backend string `json:"backend,omitempty"`
+
+ // Classes List of collections in this export
+ Classes []string `json:"classes,omitempty"`
+
+ // Error Error message if export failed
+ Error string `json:"error,omitempty"`
+
+ // Id Unique identifier for this export
+ Id string `json:"id,omitempty"`
+
+ // Path Full path where the export is stored
+ Path string `json:"path,omitempty"`
+
+ // ShardStatus Per-shard progress: className -> shardName -> status
+ ShardStatus map[string]map[string]ShardProgress `json:"shardStatus,omitempty"`
+
+ // StartedAt When the export started
+ StartedAt time.Time `json:"startedAt,omitempty"`
+
+ // Status Current status of the export
+ Status ExportStatusResponseStatus `json:"status,omitempty"`
+
+ // TookInMs Duration of the export in milliseconds
+ TookInMs int64 `json:"tookInMs,omitempty"`
+}
+
+// ExportStatusResponseStatus Current status of the export
+type ExportStatusResponseStatus string
+
// GeoCoordinates defines model for GeoCoordinates.
type GeoCoordinates struct {
// Latitude The latitude of the point on earth in decimal form.
@@ -1671,6 +1783,21 @@ type Schema struct {
Name string `json:"name,omitempty"`
}
+// ShardProgress Progress information for exporting a single shard
+type ShardProgress struct {
+ // Error Error message if this shard's export failed
+ Error string `json:"error,omitempty"`
+
+ // ObjectsExported Number of objects exported from this shard
+ ObjectsExported int64 `json:"objectsExported,omitempty"`
+
+ // Status Status of this shard's export
+ Status ShardProgressStatus `json:"status,omitempty"`
+}
+
+// ShardProgressStatus Status of this shard's export
+type ShardProgressStatus string
+
// ShardStatus The status of a single shard
type ShardStatus struct {
// Status Status of the shard
@@ -2041,6 +2168,15 @@ type BatchReferencesCreateParams struct {
ConsistencyLevel string `form:"consistency_level,omitempty" json:"consistency_level,omitempty"`
}
+// ExportStatusParams defines parameters for ExportStatus.
+type ExportStatusParams struct {
+ // Bucket Optional bucket name where the export is stored. If not specified, uses the backend's default bucket.
+ Bucket string `form:"bucket,omitempty" json:"bucket,omitempty"`
+
+ // Path Optional path prefix within the bucket. If not specified, uses the backend's default path.
+ Path string `form:"path,omitempty" json:"path,omitempty"`
+}
+
// NodesGetParams defines parameters for NodesGet.
type NodesGetParams struct {
// Output Controls the verbosity of the output, possible values are: `minimal`, `verbose`. Defaults to `minimal`.
@@ -2367,6 +2503,9 @@ type BatchReferencesCreateJSONRequestBody = BatchReferencesCreateJSONBody
// ClassificationsPostJSONRequestBody defines body for ClassificationsPost for application/json ContentType.
type ClassificationsPostJSONRequestBody = Classification
+// ExportCreateJSONRequestBody defines body for ExportCreate for application/json ContentType.
+type ExportCreateJSONRequestBody = ExportCreateRequest
+
// GraphqlPostJSONRequestBody defines body for GraphqlPost for application/json ContentType.
type GraphqlPostJSONRequestBody = GraphQLQuery
diff --git a/internal/testkit/context.go b/internal/testkit/context.go
new file mode 100644
index 00000000..98d12bad
--- /dev/null
+++ b/internal/testkit/context.go
@@ -0,0 +1,68 @@
+package testkit
+
+import (
+ "context"
+ "time"
+)
+
+// NewTickingContext returns a context which expires after N calls to Done.
+func NewTickingContext(ticks int) *tickingContext {
+ return &tickingContext{
+ ticks: ticks - 1, // ticks are "0-indexed" for convenience
+ done: nil,
+ }
+}
+
+// tickingContext models a context which expires after a number of "ticks".
+// A tick happens every time its Done method gets called. On the Nth call
+// to Done, the returned channel is closed. The next call to [Context.Err]
+// will return [context.DeadlineExceeded].
+//
+// Ticking context is useful for forcing scenarios which would otherwise be
+// depending on real time. Consider the following loop:
+//
+// for {
+// select {
+// case <-ctx.Done():
+// return nil
+// default:
+// select {
+// case <-time.After(5*time.Millisecond):
+// case <-ctx.Done():
+// return errors.New("done while sleeping")
+// }
+// }
+// }
+//
+// YMMV when trying the context to expire in the second select statement
+// using a time-based deadline. With tickingContext we can force the execution
+// to reach it by saying it should expire "after 2 ticks".
+// Similarly, we can unambiguously model a scenario where the context expires
+// exactly on the 6 iteration without introducing additional wait.
+type tickingContext struct {
+ ticks int // remaining ticks, "0-indexed" (ticks==1 means there are 2 ticks remaining)
+ done chan struct{} // done channel is closed when ticks expire.
+ err error // not safe for concurrent access.
+}
+
+var _ context.Context = (*tickingContext)(nil)
+
+// Deadline always returns ok==true, so that from the outside it looks like this
+// this context has a deadline. In does, in fact, but it's tick-based and not time-based.
+func (ctx *tickingContext) Deadline() (deadline time.Time, ok bool) { return time.Time{}, true }
+func (ctx *tickingContext) Value(key any) any { return nil }
+
+func (ctx *tickingContext) Done() <-chan struct{} {
+ if ctx.ticks == 0 {
+ ctx.err = context.DeadlineExceeded
+ if ctx.done == nil {
+ ctx.done = make(chan struct{})
+ close(ctx.done)
+ }
+ } else {
+ ctx.ticks--
+ }
+ return ctx.done
+}
+
+func (ctx *tickingContext) Err() error { return ctx.err }
diff --git a/internal/testkit/error.go b/internal/testkit/error.go
index 4ff848b6..5ffbd16a 100644
--- a/internal/testkit/error.go
+++ b/internal/testkit/error.go
@@ -70,3 +70,10 @@ func (e Error) Require(t *testing.T, err error, msgAndArgs ...any) {
t.FailNow()
}
}
+
+// ErrorIs expects a concrete non-nil error.
+func ErrorIs(want error) Error {
+ return func(tt assert.TestingT, got error, msgAndArgs ...any) bool {
+ return assert.ErrorIs(tt, got, want, msgAndArgs...)
+ }
+}
diff --git a/internal/testkit/testkit_test.go b/internal/testkit/testkit_test.go
index 908c9131..d3c06970 100644
--- a/internal/testkit/testkit_test.go
+++ b/internal/testkit/testkit_test.go
@@ -4,6 +4,7 @@ import (
"context"
"os"
"testing"
+ "time"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@@ -170,4 +171,47 @@ func TestError(t *testing.T) {
})
})
})
+
+ t.Run("ErrorIs", func(t *testing.T) {
+ t.Run("assert", func(t *testing.T) {
+ var ok bool
+ require.NotPanics(t, func() {
+ ok = testkit.ErrorIs(testkit.ErrWhaam).Assert(t, testkit.ErrWhaam)
+ })
+ require.True(t, ok, "return value")
+ })
+ t.Run("require", func(t *testing.T) {
+ require.NotPanics(t, func() {
+ testkit.ErrorIs(testkit.ErrWhaam).Require(t, testkit.ErrWhaam)
+ })
+ })
+ })
+}
+
+func TestTickingContext(t *testing.T) {
+ ctx := testkit.NewTickingContext(2)
+ require.Implements(t, (*context.Context)(nil), ctx)
+
+ _, ok := ctx.Deadline()
+ assert.True(t, ok, "must report that deadline is set")
+ assert.NoError(t, ctx.Err(), "context is initially valid")
+
+ select {
+ case <-ctx.Done():
+ require.FailNow(t, "context expired after 1 tick, want 2")
+ default:
+ }
+ require.NoError(t, ctx.Err(), "context error after 1 tick")
+
+ select {
+ case <-ctx.Done():
+ assert.ErrorIs(t, ctx.Err(), context.DeadlineExceeded)
+ assert.NotPanics(t, func() { ctx.Done() }, "close of closed channel")
+ case <-time.After(5 * time.Millisecond):
+ // When multiple channels can be read from, select will fire a case
+ // at random. To avoid flakiness, we block the second channel for a
+ // short while, such that it won't stall the test suite even if our
+ // tick logic fails.
+ require.FailNow(t, "context not done after 2 ticks")
+ }
}
diff --git a/internal/testkit/util.go b/internal/testkit/util.go
index d36884f4..f6f8aaca 100644
--- a/internal/testkit/util.go
+++ b/internal/testkit/util.go
@@ -30,5 +30,10 @@ func Ptr[T any](v T) *T { return &v }
// in unit tests to ensure the test cases are valid.
func RequirePointer(t *testing.T, v any, name string) {
t.Helper()
- require.Equalf(t, reflect.Pointer, reflect.TypeOf(v).Kind(), "%q must be a pointer", name)
+ switch reflect.TypeOf(v).Kind() {
+ case reflect.Map, reflect.Slice, reflect.Chan, reflect.Pointer:
+ return
+ default:
+ require.FailNow(t, "%q must be a pointer", name)
+ }
}
diff --git a/query/client.go b/query/client.go
index 88379dfd..11fb876d 100644
--- a/query/client.go
+++ b/query/client.go
@@ -10,7 +10,7 @@ import (
)
func NewClient(t internal.Transport, rd api.RequestDefaults) *Client {
- dev.AssertNotNil(t, "t")
+ dev.AssertNotNil(t, "transport")
return &Client{
transport: t,