diff --git a/.github/scripts/update_generation_config.sh b/.github/scripts/update_generation_config.sh index a0b95de6c0d..3b890a76b24 100644 --- a/.github/scripts/update_generation_config.sh +++ b/.github/scripts/update_generation_config.sh @@ -1,5 +1,5 @@ #!/bin/bash -set -ex +set -e # This script should be run at the root of the repository. # This script is used to update googleapis_commitish, gapic_generator_version, # and libraries_bom_version in generation configuration at the time of running @@ -174,4 +174,4 @@ if [ -z "${pr_num}" ]; then else git push gh pr edit "${pr_num}" --title "${title}" --body "${title}" -fi \ No newline at end of file +fi diff --git a/.github/workflows/hermetic_library_generation.yaml b/.github/workflows/hermetic_library_generation.yaml index f611b78e296..5912d6f0691 100644 --- a/.github/workflows/hermetic_library_generation.yaml +++ b/.github/workflows/hermetic_library_generation.yaml @@ -37,7 +37,7 @@ jobs: with: fetch-depth: 0 token: ${{ secrets.CLOUD_JAVA_BOT_TOKEN }} - - uses: googleapis/sdk-platform-java/.github/scripts@v2.60.1 + - uses: googleapis/sdk-platform-java/.github/scripts@v2.60.2 if: env.SHOULD_RUN == 'true' with: base_ref: ${{ github.base_ref }} diff --git a/.github/workflows/update_generation_config.yaml b/.github/workflows/update_generation_config.yaml index 7b14069ced8..a7e14bb483c 100644 --- a/.github/workflows/update_generation_config.yaml +++ b/.github/workflows/update_generation_config.yaml @@ -18,6 +18,7 @@ on: schedule: - cron: '0 2 * * *' workflow_dispatch: + jobs: update-generation-config: runs-on: ubuntu-24.04 @@ -42,4 +43,5 @@ jobs: --base_branch "${base_branch}" \ --repo ${{ github.repository }} env: - GH_TOKEN: ${{ secrets.CLOUD_JAVA_BOT_TOKEN }} \ No newline at end of file + GH_TOKEN: ${{ secrets.CLOUD_JAVA_BOT_TOKEN }} + diff --git a/.kokoro/presubmit/graalvm-native-a.cfg b/.kokoro/presubmit/graalvm-native-a.cfg index 5cc939cf909..96b4fed86fb 100644 --- a/.kokoro/presubmit/graalvm-native-a.cfg +++ b/.kokoro/presubmit/graalvm-native-a.cfg @@ -3,7 +3,7 @@ # Configure the docker image for kokoro-trampoline. env_vars: { key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_a:3.49.0" # {x-version-update:google-cloud-shared-dependencies:current} + value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_a:3.50.2" # {x-version-update:google-cloud-shared-dependencies:current} } env_vars: { diff --git a/.kokoro/presubmit/graalvm-native-b.cfg b/.kokoro/presubmit/graalvm-native-b.cfg index fd00159184a..76f04833149 100644 --- a/.kokoro/presubmit/graalvm-native-b.cfg +++ b/.kokoro/presubmit/graalvm-native-b.cfg @@ -3,7 +3,7 @@ # Configure the docker image for kokoro-trampoline. env_vars: { key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_b:3.49.0" # {x-version-update:google-cloud-shared-dependencies:current} + value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_b:3.50.2" # {x-version-update:google-cloud-shared-dependencies:current} } env_vars: { diff --git a/.kokoro/presubmit/graalvm-native-c.cfg b/.kokoro/presubmit/graalvm-native-c.cfg index 424494b02e0..277aa2338dd 100644 --- a/.kokoro/presubmit/graalvm-native-c.cfg +++ b/.kokoro/presubmit/graalvm-native-c.cfg @@ -3,7 +3,7 @@ # Configure the docker image for kokoro-trampoline. env_vars: { key: "TRAMPOLINE_IMAGE" - value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_c:3.49.0" # {x-version-update:google-cloud-shared-dependencies:current} + value: "gcr.io/cloud-devrel-public-resources/graalvm_sdk_platform_c:3.50.2" # {x-version-update:google-cloud-shared-dependencies:current} } env_vars: { diff --git a/README.md b/README.md index a7681605580..9a4a024a276 100644 --- a/README.md +++ b/README.md @@ -49,7 +49,7 @@ If you are using Maven without the BOM, add this to your dependencies: If you are using Gradle 5.x or later, add this to your dependencies: ```Groovy -implementation platform('com.google.cloud:libraries-bom:26.61.0') +implementation platform('com.google.cloud:libraries-bom:26.64.0') implementation 'com.google.cloud:google-cloud-spanner' ``` diff --git a/generation_config.yaml b/generation_config.yaml index 43189d4f8de..a43bc5d4f7b 100644 --- a/generation_config.yaml +++ b/generation_config.yaml @@ -1,6 +1,6 @@ -gapic_generator_version: 2.59.0 -googleapis_commitish: e5b0e779491e106ddac0b72b79222673a846fdc9 -libraries_bom_version: 26.61.0 +gapic_generator_version: 2.60.2 +googleapis_commitish: 6588689155df35bb46974c477505990f0034f569 +libraries_bom_version: 26.64.0 libraries: - api_shortname: spanner name_pretty: Cloud Spanner diff --git a/google-cloud-spanner-executor/src/main/resources/META-INF/native-image/com.google.cloud.spanner.executor.v1/reflect-config.json b/google-cloud-spanner-executor/src/main/resources/META-INF/native-image/com.google.cloud.spanner.executor.v1/reflect-config.json index 357035df837..7c9d39e9d11 100644 --- a/google-cloud-spanner-executor/src/main/resources/META-INF/native-image/com.google.cloud.spanner.executor.v1/reflect-config.json +++ b/google-cloud-spanner-executor/src/main/resources/META-INF/native-image/com.google.cloud.spanner.executor.v1/reflect-config.json @@ -2330,6 +2330,42 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.admin.database.v1.ListBackupOperationsRequest", "queryAllDeclaredConstructors": true, diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClient.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClient.java index 6ba935d94e8..3d831a07efa 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClient.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClient.java @@ -66,6 +66,8 @@ import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; import com.google.spanner.admin.database.v1.GetDatabaseRequest; import com.google.spanner.admin.database.v1.InstanceName; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse; import com.google.spanner.admin.database.v1.ListBackupOperationsRequest; import com.google.spanner.admin.database.v1.ListBackupOperationsResponse; import com.google.spanner.admin.database.v1.ListBackupSchedulesRequest; @@ -640,6 +642,25 @@ * * * + * + *

InternalUpdateGraphOperation + *

This is an internal API called by Spanner Graph jobs. You should never need to call this API directly. + * + *

Request object method variants only take one parameter, a request object, which must be constructed before the call.

+ * + *

"Flattened" method variants have converted the fields of the request object into function parameters to enable multiple ways to call the same method.

+ * + *

Callable method variants take no parameters and return an immutable API callable object, which can be used to initiate calls to the service.

+ * + * + * * * *

See the individual methods for example code. @@ -5119,6 +5140,146 @@ public final ListBackupSchedulesPagedResponse listBackupSchedules( return stub.listBackupSchedulesCallable(); } + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * This is an internal API called by Spanner Graph jobs. You should never need to call this API + * directly. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]");
+   *   String operationId = "operationId129704162";
+   *   InternalUpdateGraphOperationResponse response =
+   *       databaseAdminClient.internalUpdateGraphOperation(database, operationId);
+   * }
+   * }
+ * + * @param database Internal field, do not use directly. + * @param operationId Internal field, do not use directly. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final InternalUpdateGraphOperationResponse internalUpdateGraphOperation( + DatabaseName database, String operationId) { + InternalUpdateGraphOperationRequest request = + InternalUpdateGraphOperationRequest.newBuilder() + .setDatabase(database == null ? null : database.toString()) + .setOperationId(operationId) + .build(); + return internalUpdateGraphOperation(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * This is an internal API called by Spanner Graph jobs. You should never need to call this API + * directly. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   String database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString();
+   *   String operationId = "operationId129704162";
+   *   InternalUpdateGraphOperationResponse response =
+   *       databaseAdminClient.internalUpdateGraphOperation(database, operationId);
+   * }
+   * }
+ * + * @param database Internal field, do not use directly. + * @param operationId Internal field, do not use directly. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final InternalUpdateGraphOperationResponse internalUpdateGraphOperation( + String database, String operationId) { + InternalUpdateGraphOperationRequest request = + InternalUpdateGraphOperationRequest.newBuilder() + .setDatabase(database) + .setOperationId(operationId) + .build(); + return internalUpdateGraphOperation(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * This is an internal API called by Spanner Graph jobs. You should never need to call this API + * directly. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   InternalUpdateGraphOperationRequest request =
+   *       InternalUpdateGraphOperationRequest.newBuilder()
+   *           .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
+   *           .setOperationId("operationId129704162")
+   *           .setVmIdentityToken("vmIdentityToken-417652124")
+   *           .setProgress(-1001078227)
+   *           .setStatus(Status.newBuilder().build())
+   *           .build();
+   *   InternalUpdateGraphOperationResponse response =
+   *       databaseAdminClient.internalUpdateGraphOperation(request);
+   * }
+   * }
+ * + * @param request The request object containing all of the parameters for the API call. + * @throws com.google.api.gax.rpc.ApiException if the remote call fails + */ + public final InternalUpdateGraphOperationResponse internalUpdateGraphOperation( + InternalUpdateGraphOperationRequest request) { + return internalUpdateGraphOperationCallable().call(request); + } + + // AUTO-GENERATED DOCUMENTATION AND METHOD. + /** + * This is an internal API called by Spanner Graph jobs. You should never need to call this API + * directly. + * + *

Sample code: + * + *

{@code
+   * // This snippet has been automatically generated and should be regarded as a code template only.
+   * // It will require modifications to work:
+   * // - It may require correct/in-range values for request initialization.
+   * // - It may require specifying regional endpoints when creating the service client as shown in
+   * // https://cloud.google.com/java/docs/setup#configure_endpoints_for_the_client_library
+   * try (DatabaseAdminClient databaseAdminClient = DatabaseAdminClient.create()) {
+   *   InternalUpdateGraphOperationRequest request =
+   *       InternalUpdateGraphOperationRequest.newBuilder()
+   *           .setDatabase(DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]").toString())
+   *           .setOperationId("operationId129704162")
+   *           .setVmIdentityToken("vmIdentityToken-417652124")
+   *           .setProgress(-1001078227)
+   *           .setStatus(Status.newBuilder().build())
+   *           .build();
+   *   ApiFuture future =
+   *       databaseAdminClient.internalUpdateGraphOperationCallable().futureCall(request);
+   *   // Do something.
+   *   InternalUpdateGraphOperationResponse response = future.get();
+   * }
+   * }
+ */ + public final UnaryCallable< + InternalUpdateGraphOperationRequest, InternalUpdateGraphOperationResponse> + internalUpdateGraphOperationCallable() { + return stub.internalUpdateGraphOperationCallable(); + } + @Override public final void close() { stub.close(); diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminSettings.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminSettings.java index 399dcbf38c8..8d49eec27fa 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminSettings.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminSettings.java @@ -64,6 +64,8 @@ import com.google.spanner.admin.database.v1.GetDatabaseDdlRequest; import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; import com.google.spanner.admin.database.v1.GetDatabaseRequest; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse; import com.google.spanner.admin.database.v1.ListBackupOperationsRequest; import com.google.spanner.admin.database.v1.ListBackupOperationsResponse; import com.google.spanner.admin.database.v1.ListBackupSchedulesRequest; @@ -348,6 +350,13 @@ public UnaryCallSettings deleteBackupSchedul return ((DatabaseAdminStubSettings) getStubSettings()).listBackupSchedulesSettings(); } + /** Returns the object with the settings used for calls to internalUpdateGraph. */ + public UnaryCallSettings< + InternalUpdateGraphOperationRequest, InternalUpdateGraphOperationResponse> + internalUpdateGraphOperationSettings() { + return ((DatabaseAdminStubSettings) getStubSettings()).internalUpdateGraphOperationSettings(); + } + public static final DatabaseAdminSettings create(DatabaseAdminStubSettings stub) throws IOException { return new DatabaseAdminSettings.Builder(stub.toBuilder()).build(); @@ -652,6 +661,13 @@ public UnaryCallSettings.Builder restoreDatab return getStubSettingsBuilder().listBackupSchedulesSettings(); } + /** Returns the builder for the settings used for calls to internalUpdateGraph. */ + public UnaryCallSettings.Builder< + InternalUpdateGraphOperationRequest, InternalUpdateGraphOperationResponse> + internalUpdateGraphOperationSettings() { + return getStubSettingsBuilder().internalUpdateGraphOperationSettings(); + } + @Override public DatabaseAdminSettings build() throws IOException { return new DatabaseAdminSettings(this); diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/gapic_metadata.json b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/gapic_metadata.json index 96dc31e91d7..f6bcf8dda65 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/gapic_metadata.json +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/gapic_metadata.json @@ -49,6 +49,9 @@ "GetIamPolicy": { "methods": ["getIamPolicy", "getIamPolicy", "getIamPolicy", "getIamPolicyCallable"] }, + "InternalUpdateGraphOperation": { + "methods": ["internalUpdateGraphOperation", "internalUpdateGraphOperation", "internalUpdateGraphOperation", "internalUpdateGraphOperationCallable"] + }, "ListBackupOperations": { "methods": ["listBackupOperations", "listBackupOperations", "listBackupOperations", "listBackupOperationsPagedCallable", "listBackupOperationsCallable"] }, diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/DatabaseAdminStub.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/DatabaseAdminStub.java index 7926008bab3..274b05f78fe 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/DatabaseAdminStub.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/DatabaseAdminStub.java @@ -54,6 +54,8 @@ import com.google.spanner.admin.database.v1.GetDatabaseDdlRequest; import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; import com.google.spanner.admin.database.v1.GetDatabaseRequest; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse; import com.google.spanner.admin.database.v1.ListBackupOperationsRequest; import com.google.spanner.admin.database.v1.ListBackupOperationsResponse; import com.google.spanner.admin.database.v1.ListBackupSchedulesRequest; @@ -263,6 +265,12 @@ public UnaryCallable deleteBackupScheduleCal throw new UnsupportedOperationException("Not implemented: listBackupSchedulesCallable()"); } + public UnaryCallable + internalUpdateGraphOperationCallable() { + throw new UnsupportedOperationException( + "Not implemented: internalUpdateGraphOperationCallable()"); + } + @Override public abstract void close(); } diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/DatabaseAdminStubSettings.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/DatabaseAdminStubSettings.java index b2624c5a9a9..fa7dc6d88b6 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/DatabaseAdminStubSettings.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/DatabaseAdminStubSettings.java @@ -85,6 +85,8 @@ import com.google.spanner.admin.database.v1.GetDatabaseDdlRequest; import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; import com.google.spanner.admin.database.v1.GetDatabaseRequest; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse; import com.google.spanner.admin.database.v1.ListBackupOperationsRequest; import com.google.spanner.admin.database.v1.ListBackupOperationsResponse; import com.google.spanner.admin.database.v1.ListBackupSchedulesRequest; @@ -254,6 +256,9 @@ public class DatabaseAdminStubSettings extends StubSettings listBackupSchedulesSettings; + private final UnaryCallSettings< + InternalUpdateGraphOperationRequest, InternalUpdateGraphOperationResponse> + internalUpdateGraphOperationSettings; private static final PagedListDescriptor LIST_DATABASES_PAGE_STR_DESC = @@ -783,6 +788,13 @@ public UnaryCallSettings deleteBackupSchedul return listBackupSchedulesSettings; } + /** Returns the object with the settings used for calls to internalUpdateGraph. */ + public UnaryCallSettings< + InternalUpdateGraphOperationRequest, InternalUpdateGraphOperationResponse> + internalUpdateGraphOperationSettings() { + return internalUpdateGraphOperationSettings; + } + public DatabaseAdminStub createStub() throws IOException { if (getTransportChannelProvider() .getTransportName() @@ -927,6 +939,8 @@ protected DatabaseAdminStubSettings(Builder settingsBuilder) throws IOException updateBackupScheduleSettings = settingsBuilder.updateBackupScheduleSettings().build(); deleteBackupScheduleSettings = settingsBuilder.deleteBackupScheduleSettings().build(); listBackupSchedulesSettings = settingsBuilder.listBackupSchedulesSettings().build(); + internalUpdateGraphOperationSettings = + settingsBuilder.internalUpdateGraphOperationSettings().build(); } /** Builder for DatabaseAdminStubSettings. */ @@ -1003,6 +1017,9 @@ public static class Builder extends StubSettings.Builder listBackupSchedulesSettings; + private final UnaryCallSettings.Builder< + InternalUpdateGraphOperationRequest, InternalUpdateGraphOperationResponse> + internalUpdateGraphOperationSettings; private static final ImmutableMap> RETRYABLE_CODE_DEFINITIONS; @@ -1023,6 +1040,7 @@ public static class Builder extends StubSettings.BuildernewArrayList( StatusCode.Code.UNAVAILABLE, StatusCode.Code.DEADLINE_EXCEEDED))); + definitions.put("no_retry_codes", ImmutableSet.copyOf(Lists.newArrayList())); RETRYABLE_CODE_DEFINITIONS = definitions.build(); } @@ -1069,6 +1087,8 @@ public static class Builder extends StubSettings.Builder>of( @@ -1142,7 +1163,8 @@ protected Builder(ClientContext clientContext) { getBackupScheduleSettings, updateBackupScheduleSettings, deleteBackupScheduleSettings, - listBackupSchedulesSettings); + listBackupSchedulesSettings, + internalUpdateGraphOperationSettings); initDefaults(this); } @@ -1181,6 +1203,8 @@ protected Builder(DatabaseAdminStubSettings settings) { updateBackupScheduleSettings = settings.updateBackupScheduleSettings.toBuilder(); deleteBackupScheduleSettings = settings.deleteBackupScheduleSettings.toBuilder(); listBackupSchedulesSettings = settings.listBackupSchedulesSettings.toBuilder(); + internalUpdateGraphOperationSettings = + settings.internalUpdateGraphOperationSettings.toBuilder(); unaryMethodSettingsBuilders = ImmutableList.>of( @@ -1209,7 +1233,8 @@ protected Builder(DatabaseAdminStubSettings settings) { getBackupScheduleSettings, updateBackupScheduleSettings, deleteBackupScheduleSettings, - listBackupSchedulesSettings); + listBackupSchedulesSettings, + internalUpdateGraphOperationSettings); } private static Builder createDefault() { @@ -1367,6 +1392,11 @@ private static Builder initDefaults(Builder builder) { .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); + builder + .internalUpdateGraphOperationSettings() + .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) + .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")); + builder .createDatabaseOperationSettings() .setInitialCallSettings( @@ -1721,6 +1751,13 @@ public UnaryCallSettings.Builder restoreDatab return listBackupSchedulesSettings; } + /** Returns the builder for the settings used for calls to internalUpdateGraph. */ + public UnaryCallSettings.Builder< + InternalUpdateGraphOperationRequest, InternalUpdateGraphOperationResponse> + internalUpdateGraphOperationSettings() { + return internalUpdateGraphOperationSettings; + } + @Override public DatabaseAdminStubSettings build() throws IOException { return new DatabaseAdminStubSettings(this); diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/GrpcDatabaseAdminStub.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/GrpcDatabaseAdminStub.java index 9e6270f3c24..62987205a35 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/GrpcDatabaseAdminStub.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/GrpcDatabaseAdminStub.java @@ -59,6 +59,8 @@ import com.google.spanner.admin.database.v1.GetDatabaseDdlRequest; import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; import com.google.spanner.admin.database.v1.GetDatabaseRequest; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse; import com.google.spanner.admin.database.v1.ListBackupOperationsRequest; import com.google.spanner.admin.database.v1.ListBackupOperationsResponse; import com.google.spanner.admin.database.v1.ListBackupSchedulesRequest; @@ -353,6 +355,21 @@ public class GrpcDatabaseAdminStub extends DatabaseAdminStub { ProtoUtils.marshaller(ListBackupSchedulesResponse.getDefaultInstance())) .build(); + private static final MethodDescriptor< + InternalUpdateGraphOperationRequest, InternalUpdateGraphOperationResponse> + internalUpdateGraphOperationMethodDescriptor = + MethodDescriptor + . + newBuilder() + .setType(MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + "google.spanner.admin.database.v1.DatabaseAdmin/InternalUpdateGraphOperation") + .setRequestMarshaller( + ProtoUtils.marshaller(InternalUpdateGraphOperationRequest.getDefaultInstance())) + .setResponseMarshaller( + ProtoUtils.marshaller(InternalUpdateGraphOperationResponse.getDefaultInstance())) + .build(); + private final UnaryCallable listDatabasesCallable; private final UnaryCallable listDatabasesPagedCallable; @@ -410,6 +427,9 @@ public class GrpcDatabaseAdminStub extends DatabaseAdminStub { listBackupSchedulesCallable; private final UnaryCallable listBackupSchedulesPagedCallable; + private final UnaryCallable< + InternalUpdateGraphOperationRequest, InternalUpdateGraphOperationResponse> + internalUpdateGraphOperationCallable; private final BackgroundResource backgroundResources; private final GrpcOperationsStub operationsStub; @@ -725,6 +745,13 @@ protected GrpcDatabaseAdminStub( return builder.build(); }) .build(); + GrpcCallSettings + internalUpdateGraphOperationTransportSettings = + GrpcCallSettings + . + newBuilder() + .setMethodDescriptor(internalUpdateGraphOperationMethodDescriptor) + .build(); this.listDatabasesCallable = callableFactory.createUnaryCallable( @@ -886,6 +913,11 @@ protected GrpcDatabaseAdminStub( listBackupSchedulesTransportSettings, settings.listBackupSchedulesSettings(), clientContext); + this.internalUpdateGraphOperationCallable = + callableFactory.createUnaryCallable( + internalUpdateGraphOperationTransportSettings, + settings.internalUpdateGraphOperationSettings(), + clientContext); this.backgroundResources = new BackgroundResourceAggregation(clientContext.getBackgroundResources()); @@ -1101,6 +1133,12 @@ public UnaryCallable deleteBackupScheduleCal return listBackupSchedulesPagedCallable; } + @Override + public UnaryCallable + internalUpdateGraphOperationCallable() { + return internalUpdateGraphOperationCallable; + } + @Override public final void close() { try { diff --git a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/HttpJsonDatabaseAdminStub.java b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/HttpJsonDatabaseAdminStub.java index 038c51b144e..cfce70b7c8a 100644 --- a/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/HttpJsonDatabaseAdminStub.java +++ b/google-cloud-spanner/src/main/java/com/google/cloud/spanner/admin/database/v1/stub/HttpJsonDatabaseAdminStub.java @@ -68,6 +68,8 @@ import com.google.spanner.admin.database.v1.GetDatabaseDdlRequest; import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; import com.google.spanner.admin.database.v1.GetDatabaseRequest; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse; import com.google.spanner.admin.database.v1.ListBackupOperationsRequest; import com.google.spanner.admin.database.v1.ListBackupOperationsResponse; import com.google.spanner.admin.database.v1.ListBackupSchedulesRequest; @@ -1996,6 +1998,14 @@ public UnaryCallable deleteBackupScheduleCal return listBackupSchedulesPagedCallable; } + @Override + public UnaryCallable + internalUpdateGraphOperationCallable() { + throw new UnsupportedOperationException( + "Not implemented: internalUpdateGraphOperationCallable(). REST transport is not implemented" + + " for this method yet."); + } + @Override public final void close() { try { diff --git a/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner.admin.database.v1/reflect-config.json b/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner.admin.database.v1/reflect-config.json index 0d1524fb146..9eff2b915a3 100644 --- a/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner.admin.database.v1/reflect-config.json +++ b/google-cloud-spanner/src/main/resources/META-INF/native-image/com.google.cloud.spanner.admin.database.v1/reflect-config.json @@ -2330,6 +2330,42 @@ "allDeclaredClasses": true, "allPublicClasses": true }, + { + "name": "com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, + { + "name": "com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse$Builder", + "queryAllDeclaredConstructors": true, + "queryAllPublicConstructors": true, + "queryAllDeclaredMethods": true, + "allPublicMethods": true, + "allDeclaredClasses": true, + "allPublicClasses": true + }, { "name": "com.google.spanner.admin.database.v1.ListBackupOperationsRequest", "queryAllDeclaredConstructors": true, diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClientHttpJsonTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClientHttpJsonTest.java index 26826b4dd8d..05eeeb8fcc6 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClientHttpJsonTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClientHttpJsonTest.java @@ -3021,4 +3021,10 @@ public void listBackupSchedulesExceptionTest2() throws Exception { // Expected exception. } } + + @Test + public void internalUpdateGraphOperationUnsupportedMethodTest() throws Exception { + // The internalUpdateGraphOperation() method is not supported in REST transport. + // This empty test is generated for technical reasons. + } } diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClientTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClientTest.java index fbc20ebbd9b..a7b2a7b12f5 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClientTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/DatabaseAdminClientTest.java @@ -76,6 +76,8 @@ import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; import com.google.spanner.admin.database.v1.GetDatabaseRequest; import com.google.spanner.admin.database.v1.InstanceName; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse; import com.google.spanner.admin.database.v1.ListBackupOperationsRequest; import com.google.spanner.admin.database.v1.ListBackupOperationsResponse; import com.google.spanner.admin.database.v1.ListBackupSchedulesRequest; @@ -2740,4 +2742,86 @@ public void listBackupSchedulesExceptionTest2() throws Exception { // Expected exception. } } + + @Test + public void internalUpdateGraphOperationTest() throws Exception { + InternalUpdateGraphOperationResponse expectedResponse = + InternalUpdateGraphOperationResponse.newBuilder().build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + String operationId = "operationId129704162"; + + InternalUpdateGraphOperationResponse actualResponse = + client.internalUpdateGraphOperation(database, operationId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + InternalUpdateGraphOperationRequest actualRequest = + ((InternalUpdateGraphOperationRequest) actualRequests.get(0)); + + Assert.assertEquals(database.toString(), actualRequest.getDatabase()); + Assert.assertEquals(operationId, actualRequest.getOperationId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void internalUpdateGraphOperationExceptionTest() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + DatabaseName database = DatabaseName.of("[PROJECT]", "[INSTANCE]", "[DATABASE]"); + String operationId = "operationId129704162"; + client.internalUpdateGraphOperation(database, operationId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } + + @Test + public void internalUpdateGraphOperationTest2() throws Exception { + InternalUpdateGraphOperationResponse expectedResponse = + InternalUpdateGraphOperationResponse.newBuilder().build(); + mockDatabaseAdmin.addResponse(expectedResponse); + + String database = "database1789464955"; + String operationId = "operationId129704162"; + + InternalUpdateGraphOperationResponse actualResponse = + client.internalUpdateGraphOperation(database, operationId); + Assert.assertEquals(expectedResponse, actualResponse); + + List actualRequests = mockDatabaseAdmin.getRequests(); + Assert.assertEquals(1, actualRequests.size()); + InternalUpdateGraphOperationRequest actualRequest = + ((InternalUpdateGraphOperationRequest) actualRequests.get(0)); + + Assert.assertEquals(database, actualRequest.getDatabase()); + Assert.assertEquals(operationId, actualRequest.getOperationId()); + Assert.assertTrue( + channelProvider.isHeaderSent( + ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), + GaxGrpcProperties.getDefaultApiClientHeaderPattern())); + } + + @Test + public void internalUpdateGraphOperationExceptionTest2() throws Exception { + StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); + mockDatabaseAdmin.addException(exception); + + try { + String database = "database1789464955"; + String operationId = "operationId129704162"; + client.internalUpdateGraphOperation(database, operationId); + Assert.fail("No exception raised"); + } catch (InvalidArgumentException e) { + // Expected exception. + } + } } diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/MockDatabaseAdminImpl.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/MockDatabaseAdminImpl.java index 71284cd0640..aeeb5f2b061 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/MockDatabaseAdminImpl.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/admin/database/v1/MockDatabaseAdminImpl.java @@ -43,6 +43,8 @@ import com.google.spanner.admin.database.v1.GetDatabaseDdlRequest; import com.google.spanner.admin.database.v1.GetDatabaseDdlResponse; import com.google.spanner.admin.database.v1.GetDatabaseRequest; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest; +import com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse; import com.google.spanner.admin.database.v1.ListBackupOperationsRequest; import com.google.spanner.admin.database.v1.ListBackupOperationsResponse; import com.google.spanner.admin.database.v1.ListBackupSchedulesRequest; @@ -646,4 +648,27 @@ public void listBackupSchedules( Exception.class.getName()))); } } + + @Override + public void internalUpdateGraphOperation( + InternalUpdateGraphOperationRequest request, + StreamObserver responseObserver) { + Object response = responses.poll(); + if (response instanceof InternalUpdateGraphOperationResponse) { + requests.add(request); + responseObserver.onNext(((InternalUpdateGraphOperationResponse) response)); + responseObserver.onCompleted(); + } else if (response instanceof Exception) { + responseObserver.onError(((Exception) response)); + } else { + responseObserver.onError( + new IllegalArgumentException( + String.format( + "Unrecognized response type %s for method InternalUpdateGraphOperation, expected" + + " %s or %s", + response == null ? "null" : response.getClass().getName(), + InternalUpdateGraphOperationResponse.class.getName(), + Exception.class.getName()))); + } + } } diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/v1/SpannerClientHttpJsonTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/v1/SpannerClientHttpJsonTest.java index 319fc8a2e93..236d7f9649a 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/v1/SpannerClientHttpJsonTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/v1/SpannerClientHttpJsonTest.java @@ -927,6 +927,7 @@ public void commitTest() throws Exception { CommitResponse.newBuilder() .setCommitTimestamp(Timestamp.newBuilder().build()) .setCommitStats(CommitResponse.CommitStats.newBuilder().build()) + .setSnapshotTimestamp(Timestamp.newBuilder().build()) .build(); mockService.addResponse(expectedResponse); @@ -976,6 +977,7 @@ public void commitTest2() throws Exception { CommitResponse.newBuilder() .setCommitTimestamp(Timestamp.newBuilder().build()) .setCommitStats(CommitResponse.CommitStats.newBuilder().build()) + .setSnapshotTimestamp(Timestamp.newBuilder().build()) .build(); mockService.addResponse(expectedResponse); @@ -1025,6 +1027,7 @@ public void commitTest3() throws Exception { CommitResponse.newBuilder() .setCommitTimestamp(Timestamp.newBuilder().build()) .setCommitStats(CommitResponse.CommitStats.newBuilder().build()) + .setSnapshotTimestamp(Timestamp.newBuilder().build()) .build(); mockService.addResponse(expectedResponse); @@ -1076,6 +1079,7 @@ public void commitTest4() throws Exception { CommitResponse.newBuilder() .setCommitTimestamp(Timestamp.newBuilder().build()) .setCommitStats(CommitResponse.CommitStats.newBuilder().build()) + .setSnapshotTimestamp(Timestamp.newBuilder().build()) .build(); mockService.addResponse(expectedResponse); diff --git a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/v1/SpannerClientTest.java b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/v1/SpannerClientTest.java index a8d7f7962a6..35ed41c0d86 100644 --- a/google-cloud-spanner/src/test/java/com/google/cloud/spanner/v1/SpannerClientTest.java +++ b/google-cloud-spanner/src/test/java/com/google/cloud/spanner/v1/SpannerClientTest.java @@ -1022,6 +1022,7 @@ public void commitTest() throws Exception { CommitResponse.newBuilder() .setCommitTimestamp(Timestamp.newBuilder().build()) .setCommitStats(CommitResponse.CommitStats.newBuilder().build()) + .setSnapshotTimestamp(Timestamp.newBuilder().build()) .build(); mockSpanner.addResponse(expectedResponse); @@ -1067,6 +1068,7 @@ public void commitTest2() throws Exception { CommitResponse.newBuilder() .setCommitTimestamp(Timestamp.newBuilder().build()) .setCommitStats(CommitResponse.CommitStats.newBuilder().build()) + .setSnapshotTimestamp(Timestamp.newBuilder().build()) .build(); mockSpanner.addResponse(expectedResponse); @@ -1112,6 +1114,7 @@ public void commitTest3() throws Exception { CommitResponse.newBuilder() .setCommitTimestamp(Timestamp.newBuilder().build()) .setCommitStats(CommitResponse.CommitStats.newBuilder().build()) + .setSnapshotTimestamp(Timestamp.newBuilder().build()) .build(); mockSpanner.addResponse(expectedResponse); @@ -1157,6 +1160,7 @@ public void commitTest4() throws Exception { CommitResponse.newBuilder() .setCommitTimestamp(Timestamp.newBuilder().build()) .setCommitStats(CommitResponse.CommitStats.newBuilder().build()) + .setSnapshotTimestamp(Timestamp.newBuilder().build()) .build(); mockSpanner.addResponse(expectedResponse); diff --git a/grpc-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseAdminGrpc.java b/grpc-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseAdminGrpc.java index 5b6090743d3..1a543ae634f 100644 --- a/grpc-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseAdminGrpc.java +++ b/grpc-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/DatabaseAdminGrpc.java @@ -1232,6 +1232,59 @@ private DatabaseAdminGrpc() {} return getListBackupSchedulesMethod; } + private static volatile io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest, + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse> + getInternalUpdateGraphOperationMethod; + + @io.grpc.stub.annotations.RpcMethod( + fullMethodName = SERVICE_NAME + '/' + "InternalUpdateGraphOperation", + requestType = com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest.class, + responseType = + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse.class, + methodType = io.grpc.MethodDescriptor.MethodType.UNARY) + public static io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest, + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse> + getInternalUpdateGraphOperationMethod() { + io.grpc.MethodDescriptor< + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest, + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse> + getInternalUpdateGraphOperationMethod; + if ((getInternalUpdateGraphOperationMethod = + DatabaseAdminGrpc.getInternalUpdateGraphOperationMethod) + == null) { + synchronized (DatabaseAdminGrpc.class) { + if ((getInternalUpdateGraphOperationMethod = + DatabaseAdminGrpc.getInternalUpdateGraphOperationMethod) + == null) { + DatabaseAdminGrpc.getInternalUpdateGraphOperationMethod = + getInternalUpdateGraphOperationMethod = + io.grpc.MethodDescriptor + . + newBuilder() + .setType(io.grpc.MethodDescriptor.MethodType.UNARY) + .setFullMethodName( + generateFullMethodName(SERVICE_NAME, "InternalUpdateGraphOperation")) + .setSampledToLocalTracing(true) + .setRequestMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1 + .InternalUpdateGraphOperationRequest.getDefaultInstance())) + .setResponseMarshaller( + io.grpc.protobuf.ProtoUtils.marshaller( + com.google.spanner.admin.database.v1 + .InternalUpdateGraphOperationResponse.getDefaultInstance())) + .setSchemaDescriptor( + new DatabaseAdminMethodDescriptorSupplier("InternalUpdateGraphOperation")) + .build(); + } + } + } + return getInternalUpdateGraphOperationMethod; + } + /** Creates a new async stub that supports all call types for the service */ public static DatabaseAdminStub newStub(io.grpc.Channel channel) { io.grpc.stub.AbstractStub.StubFactory factory = @@ -1806,6 +1859,23 @@ default void listBackupSchedules( io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( getListBackupSchedulesMethod(), responseObserver); } + + /** + * + * + *
+     * This is an internal API called by Spanner Graph jobs. You should never need
+     * to call this API directly.
+     * 
+ */ + default void internalUpdateGraphOperation( + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest request, + io.grpc.stub.StreamObserver< + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse> + responseObserver) { + io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( + getInternalUpdateGraphOperationMethod(), responseObserver); + } } /** @@ -2408,6 +2478,25 @@ public void listBackupSchedules( request, responseObserver); } + + /** + * + * + *
+     * This is an internal API called by Spanner Graph jobs. You should never need
+     * to call this API directly.
+     * 
+ */ + public void internalUpdateGraphOperation( + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest request, + io.grpc.stub.StreamObserver< + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse> + responseObserver) { + io.grpc.stub.ClientCalls.asyncUnaryCall( + getChannel().newCall(getInternalUpdateGraphOperationMethod(), getCallOptions()), + request, + responseObserver); + } } /** @@ -2899,6 +2988,21 @@ public com.google.spanner.admin.database.v1.ListBackupSchedulesResponse listBack return io.grpc.stub.ClientCalls.blockingUnaryCall( getChannel(), getListBackupSchedulesMethod(), getCallOptions(), request); } + + /** + * + * + *
+     * This is an internal API called by Spanner Graph jobs. You should never need
+     * to call this API directly.
+     * 
+ */ + public com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse + internalUpdateGraphOperation( + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getInternalUpdateGraphOperationMethod(), getCallOptions(), request); + } } /** @@ -3390,6 +3494,21 @@ public com.google.spanner.admin.database.v1.ListBackupSchedulesResponse listBack return io.grpc.stub.ClientCalls.blockingUnaryCall( getChannel(), getListBackupSchedulesMethod(), getCallOptions(), request); } + + /** + * + * + *
+     * This is an internal API called by Spanner Graph jobs. You should never need
+     * to call this API directly.
+     * 
+ */ + public com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse + internalUpdateGraphOperation( + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest request) { + return io.grpc.stub.ClientCalls.blockingUnaryCall( + getChannel(), getInternalUpdateGraphOperationMethod(), getCallOptions(), request); + } } /** @@ -3903,6 +4022,22 @@ protected DatabaseAdminFutureStub build( return io.grpc.stub.ClientCalls.futureUnaryCall( getChannel().newCall(getListBackupSchedulesMethod(), getCallOptions()), request); } + + /** + * + * + *
+     * This is an internal API called by Spanner Graph jobs. You should never need
+     * to call this API directly.
+     * 
+ */ + public com.google.common.util.concurrent.ListenableFuture< + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse> + internalUpdateGraphOperation( + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest request) { + return io.grpc.stub.ClientCalls.futureUnaryCall( + getChannel().newCall(getInternalUpdateGraphOperationMethod(), getCallOptions()), request); + } } private static final int METHODID_LIST_DATABASES = 0; @@ -3931,6 +4066,7 @@ protected DatabaseAdminFutureStub build( private static final int METHODID_UPDATE_BACKUP_SCHEDULE = 23; private static final int METHODID_DELETE_BACKUP_SCHEDULE = 24; private static final int METHODID_LIST_BACKUP_SCHEDULES = 25; + private static final int METHODID_INTERNAL_UPDATE_GRAPH_OPERATION = 26; private static final class MethodHandlers implements io.grpc.stub.ServerCalls.UnaryMethod, @@ -4102,6 +4238,13 @@ public void invoke(Req request, io.grpc.stub.StreamObserver responseObserv com.google.spanner.admin.database.v1.ListBackupSchedulesResponse>) responseObserver); break; + case METHODID_INTERNAL_UPDATE_GRAPH_OPERATION: + serviceImpl.internalUpdateGraphOperation( + (com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest) request, + (io.grpc.stub.StreamObserver< + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse>) + responseObserver); + break; default: throw new AssertionError(); } @@ -4286,6 +4429,13 @@ public static final io.grpc.ServerServiceDefinition bindService(AsyncService ser com.google.spanner.admin.database.v1.ListBackupSchedulesRequest, com.google.spanner.admin.database.v1.ListBackupSchedulesResponse>( service, METHODID_LIST_BACKUP_SCHEDULES))) + .addMethod( + getInternalUpdateGraphOperationMethod(), + io.grpc.stub.ServerCalls.asyncUnaryCall( + new MethodHandlers< + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest, + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse>( + service, METHODID_INTERNAL_UPDATE_GRAPH_OPERATION))) .build(); } @@ -4363,6 +4513,7 @@ public static io.grpc.ServiceDescriptor getServiceDescriptor() { .addMethod(getUpdateBackupScheduleMethod()) .addMethod(getDeleteBackupScheduleMethod()) .addMethod(getListBackupSchedulesMethod()) + .addMethod(getInternalUpdateGraphOperationMethod()) .build(); } } diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/InternalUpdateGraphOperationRequest.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/InternalUpdateGraphOperationRequest.java new file mode 100644 index 00000000000..a16bdbbfbf4 --- /dev/null +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/InternalUpdateGraphOperationRequest.java @@ -0,0 +1,1424 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/database/v1/spanner_database_admin.proto + +// Protobuf Java Version: 3.25.8 +package com.google.spanner.admin.database.v1; + +/** + * + * + *
+ * Internal request proto, do not use directly.
+ * 
+ * + * Protobuf type {@code google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest} + */ +public final class InternalUpdateGraphOperationRequest + extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest) + InternalUpdateGraphOperationRequestOrBuilder { + private static final long serialVersionUID = 0L; + + // Use InternalUpdateGraphOperationRequest.newBuilder() to construct. + private InternalUpdateGraphOperationRequest( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private InternalUpdateGraphOperationRequest() { + database_ = ""; + operationId_ = ""; + vmIdentityToken_ = ""; + } + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new InternalUpdateGraphOperationRequest(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest.class, + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest.Builder.class); + } + + private int bitField0_; + public static final int DATABASE_FIELD_NUMBER = 1; + + @SuppressWarnings("serial") + private volatile java.lang.Object database_ = ""; + + /** + * + * + *
+   * Internal field, do not use directly.
+   * 
+ * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The database. + */ + @java.lang.Override + public java.lang.String getDatabase() { + java.lang.Object ref = database_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + database_ = s; + return s; + } + } + + /** + * + * + *
+   * Internal field, do not use directly.
+   * 
+ * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for database. + */ + @java.lang.Override + public com.google.protobuf.ByteString getDatabaseBytes() { + java.lang.Object ref = database_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + database_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int OPERATION_ID_FIELD_NUMBER = 2; + + @SuppressWarnings("serial") + private volatile java.lang.Object operationId_ = ""; + + /** + * + * + *
+   * Internal field, do not use directly.
+   * 
+ * + * string operation_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The operationId. + */ + @java.lang.Override + public java.lang.String getOperationId() { + java.lang.Object ref = operationId_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + operationId_ = s; + return s; + } + } + + /** + * + * + *
+   * Internal field, do not use directly.
+   * 
+ * + * string operation_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for operationId. + */ + @java.lang.Override + public com.google.protobuf.ByteString getOperationIdBytes() { + java.lang.Object ref = operationId_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + operationId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int VM_IDENTITY_TOKEN_FIELD_NUMBER = 5; + + @SuppressWarnings("serial") + private volatile java.lang.Object vmIdentityToken_ = ""; + + /** + * + * + *
+   * Internal field, do not use directly.
+   * 
+ * + * string vm_identity_token = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The vmIdentityToken. + */ + @java.lang.Override + public java.lang.String getVmIdentityToken() { + java.lang.Object ref = vmIdentityToken_; + if (ref instanceof java.lang.String) { + return (java.lang.String) ref; + } else { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + vmIdentityToken_ = s; + return s; + } + } + + /** + * + * + *
+   * Internal field, do not use directly.
+   * 
+ * + * string vm_identity_token = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for vmIdentityToken. + */ + @java.lang.Override + public com.google.protobuf.ByteString getVmIdentityTokenBytes() { + java.lang.Object ref = vmIdentityToken_; + if (ref instanceof java.lang.String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + vmIdentityToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + public static final int PROGRESS_FIELD_NUMBER = 3; + private double progress_ = 0D; + + /** + * + * + *
+   * Internal field, do not use directly.
+   * 
+ * + * double progress = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The progress. + */ + @java.lang.Override + public double getProgress() { + return progress_; + } + + public static final int STATUS_FIELD_NUMBER = 6; + private com.google.rpc.Status status_; + + /** + * + * + *
+   * Internal field, do not use directly.
+   * 
+ * + * .google.rpc.Status status = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the status field is set. + */ + @java.lang.Override + public boolean hasStatus() { + return ((bitField0_ & 0x00000001) != 0); + } + + /** + * + * + *
+   * Internal field, do not use directly.
+   * 
+ * + * .google.rpc.Status status = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The status. + */ + @java.lang.Override + public com.google.rpc.Status getStatus() { + return status_ == null ? com.google.rpc.Status.getDefaultInstance() : status_; + } + + /** + * + * + *
+   * Internal field, do not use directly.
+   * 
+ * + * .google.rpc.Status status = 6 [(.google.api.field_behavior) = OPTIONAL]; + */ + @java.lang.Override + public com.google.rpc.StatusOrBuilder getStatusOrBuilder() { + return status_ == null ? com.google.rpc.Status.getDefaultInstance() : status_; + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(database_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 1, database_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(operationId_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 2, operationId_); + } + if (java.lang.Double.doubleToRawLongBits(progress_) != 0) { + output.writeDouble(3, progress_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(vmIdentityToken_)) { + com.google.protobuf.GeneratedMessageV3.writeString(output, 5, vmIdentityToken_); + } + if (((bitField0_ & 0x00000001) != 0)) { + output.writeMessage(6, getStatus()); + } + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(database_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, database_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(operationId_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(2, operationId_); + } + if (java.lang.Double.doubleToRawLongBits(progress_) != 0) { + size += com.google.protobuf.CodedOutputStream.computeDoubleSize(3, progress_); + } + if (!com.google.protobuf.GeneratedMessageV3.isStringEmpty(vmIdentityToken_)) { + size += com.google.protobuf.GeneratedMessageV3.computeStringSize(5, vmIdentityToken_); + } + if (((bitField0_ & 0x00000001) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(6, getStatus()); + } + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest other = + (com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest) obj; + + if (!getDatabase().equals(other.getDatabase())) return false; + if (!getOperationId().equals(other.getOperationId())) return false; + if (!getVmIdentityToken().equals(other.getVmIdentityToken())) return false; + if (java.lang.Double.doubleToLongBits(getProgress()) + != java.lang.Double.doubleToLongBits(other.getProgress())) return false; + if (hasStatus() != other.hasStatus()) return false; + if (hasStatus()) { + if (!getStatus().equals(other.getStatus())) return false; + } + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (37 * hash) + DATABASE_FIELD_NUMBER; + hash = (53 * hash) + getDatabase().hashCode(); + hash = (37 * hash) + OPERATION_ID_FIELD_NUMBER; + hash = (53 * hash) + getOperationId().hashCode(); + hash = (37 * hash) + VM_IDENTITY_TOKEN_FIELD_NUMBER; + hash = (53 * hash) + getVmIdentityToken().hashCode(); + hash = (37 * hash) + PROGRESS_FIELD_NUMBER; + hash = + (53 * hash) + + com.google.protobuf.Internal.hashLong( + java.lang.Double.doubleToLongBits(getProgress())); + if (hasStatus()) { + hash = (37 * hash) + STATUS_FIELD_NUMBER; + hash = (53 * hash) + getStatus().hashCode(); + } + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Internal request proto, do not use directly.
+   * 
+ * + * Protobuf type {@code google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest) + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequestOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationRequest_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationRequest_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest.class, + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest.Builder + .class); + } + + // Construct using + // com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest.newBuilder() + private Builder() { + maybeForceBuilderInitialization(); + } + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + maybeForceBuilderInitialization(); + } + + private void maybeForceBuilderInitialization() { + if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { + getStatusFieldBuilder(); + } + } + + @java.lang.Override + public Builder clear() { + super.clear(); + bitField0_ = 0; + database_ = ""; + operationId_ = ""; + vmIdentityToken_ = ""; + progress_ = 0D; + status_ = null; + if (statusBuilder_ != null) { + statusBuilder_.dispose(); + statusBuilder_ = null; + } + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationRequest_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest + getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest build() { + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest buildPartial() { + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest result = + new com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest(this); + if (bitField0_ != 0) { + buildPartial0(result); + } + onBuilt(); + return result; + } + + private void buildPartial0( + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest result) { + int from_bitField0_ = bitField0_; + if (((from_bitField0_ & 0x00000001) != 0)) { + result.database_ = database_; + } + if (((from_bitField0_ & 0x00000002) != 0)) { + result.operationId_ = operationId_; + } + if (((from_bitField0_ & 0x00000004) != 0)) { + result.vmIdentityToken_ = vmIdentityToken_; + } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.progress_ = progress_; + } + int to_bitField0_ = 0; + if (((from_bitField0_ & 0x00000010) != 0)) { + result.status_ = statusBuilder_ == null ? status_ : statusBuilder_.build(); + to_bitField0_ |= 0x00000001; + } + result.bitField0_ |= to_bitField0_; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest) { + return mergeFrom( + (com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest other) { + if (other + == com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest + .getDefaultInstance()) return this; + if (!other.getDatabase().isEmpty()) { + database_ = other.database_; + bitField0_ |= 0x00000001; + onChanged(); + } + if (!other.getOperationId().isEmpty()) { + operationId_ = other.operationId_; + bitField0_ |= 0x00000002; + onChanged(); + } + if (!other.getVmIdentityToken().isEmpty()) { + vmIdentityToken_ = other.vmIdentityToken_; + bitField0_ |= 0x00000004; + onChanged(); + } + if (other.getProgress() != 0D) { + setProgress(other.getProgress()); + } + if (other.hasStatus()) { + mergeStatus(other.getStatus()); + } + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + case 10: + { + database_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000001; + break; + } // case 10 + case 18: + { + operationId_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000002; + break; + } // case 18 + case 25: + { + progress_ = input.readDouble(); + bitField0_ |= 0x00000008; + break; + } // case 25 + case 42: + { + vmIdentityToken_ = input.readStringRequireUtf8(); + bitField0_ |= 0x00000004; + break; + } // case 42 + case 50: + { + input.readMessage(getStatusFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000010; + break; + } // case 50 + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + private int bitField0_; + + private java.lang.Object database_ = ""; + + /** + * + * + *
+     * Internal field, do not use directly.
+     * 
+ * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The database. + */ + public java.lang.String getDatabase() { + java.lang.Object ref = database_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + database_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Internal field, do not use directly.
+     * 
+ * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for database. + */ + public com.google.protobuf.ByteString getDatabaseBytes() { + java.lang.Object ref = database_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + database_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Internal field, do not use directly.
+     * 
+ * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The database to set. + * @return This builder for chaining. + */ + public Builder setDatabase(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + database_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + /** + * + * + *
+     * Internal field, do not use directly.
+     * 
+ * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return This builder for chaining. + */ + public Builder clearDatabase() { + database_ = getDefaultInstance().getDatabase(); + bitField0_ = (bitField0_ & ~0x00000001); + onChanged(); + return this; + } + + /** + * + * + *
+     * Internal field, do not use directly.
+     * 
+ * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @param value The bytes for database to set. + * @return This builder for chaining. + */ + public Builder setDatabaseBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + database_ = value; + bitField0_ |= 0x00000001; + onChanged(); + return this; + } + + private java.lang.Object operationId_ = ""; + + /** + * + * + *
+     * Internal field, do not use directly.
+     * 
+ * + * string operation_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The operationId. + */ + public java.lang.String getOperationId() { + java.lang.Object ref = operationId_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + operationId_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Internal field, do not use directly.
+     * 
+ * + * string operation_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for operationId. + */ + public com.google.protobuf.ByteString getOperationIdBytes() { + java.lang.Object ref = operationId_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + operationId_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Internal field, do not use directly.
+     * 
+ * + * string operation_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The operationId to set. + * @return This builder for chaining. + */ + public Builder setOperationId(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + operationId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + /** + * + * + *
+     * Internal field, do not use directly.
+     * 
+ * + * string operation_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearOperationId() { + operationId_ = getDefaultInstance().getOperationId(); + bitField0_ = (bitField0_ & ~0x00000002); + onChanged(); + return this; + } + + /** + * + * + *
+     * Internal field, do not use directly.
+     * 
+ * + * string operation_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for operationId to set. + * @return This builder for chaining. + */ + public Builder setOperationIdBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + operationId_ = value; + bitField0_ |= 0x00000002; + onChanged(); + return this; + } + + private java.lang.Object vmIdentityToken_ = ""; + + /** + * + * + *
+     * Internal field, do not use directly.
+     * 
+ * + * string vm_identity_token = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The vmIdentityToken. + */ + public java.lang.String getVmIdentityToken() { + java.lang.Object ref = vmIdentityToken_; + if (!(ref instanceof java.lang.String)) { + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; + java.lang.String s = bs.toStringUtf8(); + vmIdentityToken_ = s; + return s; + } else { + return (java.lang.String) ref; + } + } + + /** + * + * + *
+     * Internal field, do not use directly.
+     * 
+ * + * string vm_identity_token = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for vmIdentityToken. + */ + public com.google.protobuf.ByteString getVmIdentityTokenBytes() { + java.lang.Object ref = vmIdentityToken_; + if (ref instanceof String) { + com.google.protobuf.ByteString b = + com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); + vmIdentityToken_ = b; + return b; + } else { + return (com.google.protobuf.ByteString) ref; + } + } + + /** + * + * + *
+     * Internal field, do not use directly.
+     * 
+ * + * string vm_identity_token = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The vmIdentityToken to set. + * @return This builder for chaining. + */ + public Builder setVmIdentityToken(java.lang.String value) { + if (value == null) { + throw new NullPointerException(); + } + vmIdentityToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + /** + * + * + *
+     * Internal field, do not use directly.
+     * 
+ * + * string vm_identity_token = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @return This builder for chaining. + */ + public Builder clearVmIdentityToken() { + vmIdentityToken_ = getDefaultInstance().getVmIdentityToken(); + bitField0_ = (bitField0_ & ~0x00000004); + onChanged(); + return this; + } + + /** + * + * + *
+     * Internal field, do not use directly.
+     * 
+ * + * string vm_identity_token = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @param value The bytes for vmIdentityToken to set. + * @return This builder for chaining. + */ + public Builder setVmIdentityTokenBytes(com.google.protobuf.ByteString value) { + if (value == null) { + throw new NullPointerException(); + } + checkByteStringIsUtf8(value); + vmIdentityToken_ = value; + bitField0_ |= 0x00000004; + onChanged(); + return this; + } + + private double progress_; + + /** + * + * + *
+     * Internal field, do not use directly.
+     * 
+ * + * double progress = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The progress. + */ + @java.lang.Override + public double getProgress() { + return progress_; + } + + /** + * + * + *
+     * Internal field, do not use directly.
+     * 
+ * + * double progress = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @param value The progress to set. + * @return This builder for chaining. + */ + public Builder setProgress(double value) { + + progress_ = value; + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * Internal field, do not use directly.
+     * 
+ * + * double progress = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return This builder for chaining. + */ + public Builder clearProgress() { + bitField0_ = (bitField0_ & ~0x00000008); + progress_ = 0D; + onChanged(); + return this; + } + + private com.google.rpc.Status status_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.rpc.Status, com.google.rpc.Status.Builder, com.google.rpc.StatusOrBuilder> + statusBuilder_; + + /** + * + * + *
+     * Internal field, do not use directly.
+     * 
+ * + * .google.rpc.Status status = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the status field is set. + */ + public boolean hasStatus() { + return ((bitField0_ & 0x00000010) != 0); + } + + /** + * + * + *
+     * Internal field, do not use directly.
+     * 
+ * + * .google.rpc.Status status = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The status. + */ + public com.google.rpc.Status getStatus() { + if (statusBuilder_ == null) { + return status_ == null ? com.google.rpc.Status.getDefaultInstance() : status_; + } else { + return statusBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * Internal field, do not use directly.
+     * 
+ * + * .google.rpc.Status status = 6 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder setStatus(com.google.rpc.Status value) { + if (statusBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + status_ = value; + } else { + statusBuilder_.setMessage(value); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Internal field, do not use directly.
+     * 
+ * + * .google.rpc.Status status = 6 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder setStatus(com.google.rpc.Status.Builder builderForValue) { + if (statusBuilder_ == null) { + status_ = builderForValue.build(); + } else { + statusBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000010; + onChanged(); + return this; + } + + /** + * + * + *
+     * Internal field, do not use directly.
+     * 
+ * + * .google.rpc.Status status = 6 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder mergeStatus(com.google.rpc.Status value) { + if (statusBuilder_ == null) { + if (((bitField0_ & 0x00000010) != 0) + && status_ != null + && status_ != com.google.rpc.Status.getDefaultInstance()) { + getStatusBuilder().mergeFrom(value); + } else { + status_ = value; + } + } else { + statusBuilder_.mergeFrom(value); + } + if (status_ != null) { + bitField0_ |= 0x00000010; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * Internal field, do not use directly.
+     * 
+ * + * .google.rpc.Status status = 6 [(.google.api.field_behavior) = OPTIONAL]; + */ + public Builder clearStatus() { + bitField0_ = (bitField0_ & ~0x00000010); + status_ = null; + if (statusBuilder_ != null) { + statusBuilder_.dispose(); + statusBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * Internal field, do not use directly.
+     * 
+ * + * .google.rpc.Status status = 6 [(.google.api.field_behavior) = OPTIONAL]; + */ + public com.google.rpc.Status.Builder getStatusBuilder() { + bitField0_ |= 0x00000010; + onChanged(); + return getStatusFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * Internal field, do not use directly.
+     * 
+ * + * .google.rpc.Status status = 6 [(.google.api.field_behavior) = OPTIONAL]; + */ + public com.google.rpc.StatusOrBuilder getStatusOrBuilder() { + if (statusBuilder_ != null) { + return statusBuilder_.getMessageOrBuilder(); + } else { + return status_ == null ? com.google.rpc.Status.getDefaultInstance() : status_; + } + } + + /** + * + * + *
+     * Internal field, do not use directly.
+     * 
+ * + * .google.rpc.Status status = 6 [(.google.api.field_behavior) = OPTIONAL]; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.rpc.Status, com.google.rpc.Status.Builder, com.google.rpc.StatusOrBuilder> + getStatusFieldBuilder() { + if (statusBuilder_ == null) { + statusBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.rpc.Status, + com.google.rpc.Status.Builder, + com.google.rpc.StatusOrBuilder>(getStatus(), getParentForChildren(), isClean()); + status_ = null; + } + return statusBuilder_; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest) + private static final com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest(); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public InternalUpdateGraphOperationRequest parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/InternalUpdateGraphOperationRequestOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/InternalUpdateGraphOperationRequestOrBuilder.java new file mode 100644 index 00000000000..babdcdfc074 --- /dev/null +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/InternalUpdateGraphOperationRequestOrBuilder.java @@ -0,0 +1,158 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/database/v1/spanner_database_admin.proto + +// Protobuf Java Version: 3.25.8 +package com.google.spanner.admin.database.v1; + +public interface InternalUpdateGraphOperationRequestOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.InternalUpdateGraphOperationRequest) + com.google.protobuf.MessageOrBuilder { + + /** + * + * + *
+   * Internal field, do not use directly.
+   * 
+ * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The database. + */ + java.lang.String getDatabase(); + + /** + * + * + *
+   * Internal field, do not use directly.
+   * 
+ * + * + * string database = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } + * + * + * @return The bytes for database. + */ + com.google.protobuf.ByteString getDatabaseBytes(); + + /** + * + * + *
+   * Internal field, do not use directly.
+   * 
+ * + * string operation_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The operationId. + */ + java.lang.String getOperationId(); + + /** + * + * + *
+   * Internal field, do not use directly.
+   * 
+ * + * string operation_id = 2 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for operationId. + */ + com.google.protobuf.ByteString getOperationIdBytes(); + + /** + * + * + *
+   * Internal field, do not use directly.
+   * 
+ * + * string vm_identity_token = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The vmIdentityToken. + */ + java.lang.String getVmIdentityToken(); + + /** + * + * + *
+   * Internal field, do not use directly.
+   * 
+ * + * string vm_identity_token = 5 [(.google.api.field_behavior) = REQUIRED]; + * + * @return The bytes for vmIdentityToken. + */ + com.google.protobuf.ByteString getVmIdentityTokenBytes(); + + /** + * + * + *
+   * Internal field, do not use directly.
+   * 
+ * + * double progress = 3 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The progress. + */ + double getProgress(); + + /** + * + * + *
+   * Internal field, do not use directly.
+   * 
+ * + * .google.rpc.Status status = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return Whether the status field is set. + */ + boolean hasStatus(); + + /** + * + * + *
+   * Internal field, do not use directly.
+   * 
+ * + * .google.rpc.Status status = 6 [(.google.api.field_behavior) = OPTIONAL]; + * + * @return The status. + */ + com.google.rpc.Status getStatus(); + + /** + * + * + *
+   * Internal field, do not use directly.
+   * 
+ * + * .google.rpc.Status status = 6 [(.google.api.field_behavior) = OPTIONAL]; + */ + com.google.rpc.StatusOrBuilder getStatusOrBuilder(); +} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/InternalUpdateGraphOperationResponse.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/InternalUpdateGraphOperationResponse.java new file mode 100644 index 00000000000..50020565303 --- /dev/null +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/InternalUpdateGraphOperationResponse.java @@ -0,0 +1,454 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/database/v1/spanner_database_admin.proto + +// Protobuf Java Version: 3.25.8 +package com.google.spanner.admin.database.v1; + +/** + * + * + *
+ * Internal response proto, do not use directly.
+ * 
+ * + * Protobuf type {@code google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse} + */ +public final class InternalUpdateGraphOperationResponse + extends com.google.protobuf.GeneratedMessageV3 + implements + // @@protoc_insertion_point(message_implements:google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse) + InternalUpdateGraphOperationResponseOrBuilder { + private static final long serialVersionUID = 0L; + + // Use InternalUpdateGraphOperationResponse.newBuilder() to construct. + private InternalUpdateGraphOperationResponse( + com.google.protobuf.GeneratedMessageV3.Builder builder) { + super(builder); + } + + private InternalUpdateGraphOperationResponse() {} + + @java.lang.Override + @SuppressWarnings({"unused"}) + protected java.lang.Object newInstance(UnusedPrivateParameter unused) { + return new InternalUpdateGraphOperationResponse(); + } + + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse.class, + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse.Builder + .class); + } + + private byte memoizedIsInitialized = -1; + + @java.lang.Override + public final boolean isInitialized() { + byte isInitialized = memoizedIsInitialized; + if (isInitialized == 1) return true; + if (isInitialized == 0) return false; + + memoizedIsInitialized = 1; + return true; + } + + @java.lang.Override + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { + getUnknownFields().writeTo(output); + } + + @java.lang.Override + public int getSerializedSize() { + int size = memoizedSize; + if (size != -1) return size; + + size = 0; + size += getUnknownFields().getSerializedSize(); + memoizedSize = size; + return size; + } + + @java.lang.Override + public boolean equals(final java.lang.Object obj) { + if (obj == this) { + return true; + } + if (!(obj + instanceof com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse)) { + return super.equals(obj); + } + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse other = + (com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse) obj; + + if (!getUnknownFields().equals(other.getUnknownFields())) return false; + return true; + } + + @java.lang.Override + public int hashCode() { + if (memoizedHashCode != 0) { + return memoizedHashCode; + } + int hash = 41; + hash = (19 * hash) + getDescriptor().hashCode(); + hash = (29 * hash) + getUnknownFields().hashCode(); + memoizedHashCode = hash; + return hash; + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse parseFrom( + java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse parseFrom( + java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse parseFrom( + com.google.protobuf.ByteString data) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse parseFrom( + com.google.protobuf.ByteString data, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse parseFrom( + byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse parseFrom( + byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + return PARSER.parseFrom(data, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse parseFrom( + java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse parseFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse + parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse + parseDelimitedFrom( + java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( + PARSER, input, extensionRegistry); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse parseFrom( + com.google.protobuf.CodedInputStream input) throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse parseFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + return com.google.protobuf.GeneratedMessageV3.parseWithIOException( + PARSER, input, extensionRegistry); + } + + @java.lang.Override + public Builder newBuilderForType() { + return newBuilder(); + } + + public static Builder newBuilder() { + return DEFAULT_INSTANCE.toBuilder(); + } + + public static Builder newBuilder( + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse prototype) { + return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); + } + + @java.lang.Override + public Builder toBuilder() { + return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); + } + + @java.lang.Override + protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + Builder builder = new Builder(parent); + return builder; + } + + /** + * + * + *
+   * Internal response proto, do not use directly.
+   * 
+ * + * Protobuf type {@code google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse} + */ + public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder + implements + // @@protoc_insertion_point(builder_implements:google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse) + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponseOrBuilder { + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationResponse_descriptor; + } + + @java.lang.Override + protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internalGetFieldAccessorTable() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationResponse_fieldAccessorTable + .ensureFieldAccessorsInitialized( + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse.class, + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse.Builder + .class); + } + + // Construct using + // com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse.newBuilder() + private Builder() {} + + private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { + super(parent); + } + + @java.lang.Override + public Builder clear() { + super.clear(); + return this; + } + + @java.lang.Override + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { + return com.google.spanner.admin.database.v1.SpannerDatabaseAdminProto + .internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationResponse_descriptor; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse + getDefaultInstanceForType() { + return com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse + .getDefaultInstance(); + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse build() { + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse result = + buildPartial(); + if (!result.isInitialized()) { + throw newUninitializedMessageException(result); + } + return result; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse + buildPartial() { + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse result = + new com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse(this); + onBuilt(); + return result; + } + + @java.lang.Override + public Builder clone() { + return super.clone(); + } + + @java.lang.Override + public Builder setField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.setField(field, value); + } + + @java.lang.Override + public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { + return super.clearField(field); + } + + @java.lang.Override + public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { + return super.clearOneof(oneof); + } + + @java.lang.Override + public Builder setRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, int index, java.lang.Object value) { + return super.setRepeatedField(field, index, value); + } + + @java.lang.Override + public Builder addRepeatedField( + com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { + return super.addRepeatedField(field, value); + } + + @java.lang.Override + public Builder mergeFrom(com.google.protobuf.Message other) { + if (other + instanceof com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse) { + return mergeFrom( + (com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse) other); + } else { + super.mergeFrom(other); + return this; + } + } + + public Builder mergeFrom( + com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse other) { + if (other + == com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse + .getDefaultInstance()) return this; + this.mergeUnknownFields(other.getUnknownFields()); + onChanged(); + return this; + } + + @java.lang.Override + public final boolean isInitialized() { + return true; + } + + @java.lang.Override + public Builder mergeFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws java.io.IOException { + if (extensionRegistry == null) { + throw new java.lang.NullPointerException(); + } + try { + boolean done = false; + while (!done) { + int tag = input.readTag(); + switch (tag) { + case 0: + done = true; + break; + default: + { + if (!super.parseUnknownField(input, extensionRegistry, tag)) { + done = true; // was an endgroup tag + } + break; + } // default: + } // switch (tag) + } // while (!done) + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.unwrapIOException(); + } finally { + onChanged(); + } // finally + return this; + } + + @java.lang.Override + public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.setUnknownFields(unknownFields); + } + + @java.lang.Override + public final Builder mergeUnknownFields( + final com.google.protobuf.UnknownFieldSet unknownFields) { + return super.mergeUnknownFields(unknownFields); + } + + // @@protoc_insertion_point(builder_scope:google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse) + } + + // @@protoc_insertion_point(class_scope:google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse) + private static final com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse + DEFAULT_INSTANCE; + + static { + DEFAULT_INSTANCE = + new com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse(); + } + + public static com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse + getDefaultInstance() { + return DEFAULT_INSTANCE; + } + + private static final com.google.protobuf.Parser PARSER = + new com.google.protobuf.AbstractParser() { + @java.lang.Override + public InternalUpdateGraphOperationResponse parsePartialFrom( + com.google.protobuf.CodedInputStream input, + com.google.protobuf.ExtensionRegistryLite extensionRegistry) + throws com.google.protobuf.InvalidProtocolBufferException { + Builder builder = newBuilder(); + try { + builder.mergeFrom(input, extensionRegistry); + } catch (com.google.protobuf.InvalidProtocolBufferException e) { + throw e.setUnfinishedMessage(builder.buildPartial()); + } catch (com.google.protobuf.UninitializedMessageException e) { + throw e.asInvalidProtocolBufferException().setUnfinishedMessage(builder.buildPartial()); + } catch (java.io.IOException e) { + throw new com.google.protobuf.InvalidProtocolBufferException(e) + .setUnfinishedMessage(builder.buildPartial()); + } + return builder.buildPartial(); + } + }; + + public static com.google.protobuf.Parser parser() { + return PARSER; + } + + @java.lang.Override + public com.google.protobuf.Parser getParserForType() { + return PARSER; + } + + @java.lang.Override + public com.google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse + getDefaultInstanceForType() { + return DEFAULT_INSTANCE; + } +} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/InternalUpdateGraphOperationResponseOrBuilder.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/InternalUpdateGraphOperationResponseOrBuilder.java new file mode 100644 index 00000000000..7f5e74fca40 --- /dev/null +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/InternalUpdateGraphOperationResponseOrBuilder.java @@ -0,0 +1,25 @@ +/* + * Copyright 2025 Google LLC + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * https://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +// Generated by the protocol buffer compiler. DO NOT EDIT! +// source: google/spanner/admin/database/v1/spanner_database_admin.proto + +// Protobuf Java Version: 3.25.8 +package com.google.spanner.admin.database.v1; + +public interface InternalUpdateGraphOperationResponseOrBuilder + extends + // @@protoc_insertion_point(interface_extends:google.spanner.admin.database.v1.InternalUpdateGraphOperationResponse) + com.google.protobuf.MessageOrBuilder {} diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/SpannerDatabaseAdminProto.java b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/SpannerDatabaseAdminProto.java index 520c47553ae..3ede3d5a3fe 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/SpannerDatabaseAdminProto.java +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/java/com/google/spanner/admin/database/v1/SpannerDatabaseAdminProto.java @@ -140,6 +140,14 @@ public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry r internal_static_google_spanner_admin_database_v1_SplitPoints_Key_descriptor; static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable internal_static_google_spanner_admin_database_v1_SplitPoints_Key_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationRequest_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationRequest_fieldAccessorTable; + static final com.google.protobuf.Descriptors.Descriptor + internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationResponse_descriptor; + static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable + internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationResponse_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { return descriptor; @@ -149,332 +157,337 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { static { java.lang.String[] descriptorData = { - "\n=google/spanner/admin/database/v1/spann" - + "er_database_admin.proto\022 google.spanner." + "\n" + + "=google/spanner/admin/database/v1/spanner_database_admin.proto\022 google.spanner." + "admin.database.v1\032\034google/api/annotation" + "s.proto\032\027google/api/client.proto\032\037google" + "/api/field_behavior.proto\032\031google/api/re" + "source.proto\032\036google/iam/v1/iam_policy.p" + "roto\032\032google/iam/v1/policy.proto\032#google" - + "/longrunning/operations.proto\032\033google/pr" - + "otobuf/empty.proto\032 google/protobuf/fiel" - + "d_mask.proto\032\034google/protobuf/struct.pro" - + "to\032\037google/protobuf/timestamp.proto\032-goo" - + "gle/spanner/admin/database/v1/backup.pro" - + "to\0326google/spanner/admin/database/v1/bac" - + "kup_schedule.proto\032-google/spanner/admin" - + "/database/v1/common.proto\"\253\001\n\013RestoreInf" - + "o\022H\n\013source_type\030\001 \001(\01623.google.spanner." - + "admin.database.v1.RestoreSourceType\022C\n\013b" - + "ackup_info\030\002 \001(\0132,.google.spanner.admin." - + "database.v1.BackupInfoH\000B\r\n\013source_info\"" - + "\312\006\n\010Database\022\021\n\004name\030\001 \001(\tB\003\340A\002\022D\n\005state" - + "\030\002 \001(\01620.google.spanner.admin.database.v" - + "1.Database.StateB\003\340A\003\0224\n\013create_time\030\003 \001" - + "(\0132\032.google.protobuf.TimestampB\003\340A\003\022H\n\014r" - + "estore_info\030\004 \001(\0132-.google.spanner.admin" - + ".database.v1.RestoreInfoB\003\340A\003\022R\n\021encrypt" - + "ion_config\030\005 \001(\01322.google.spanner.admin." - + "database.v1.EncryptionConfigB\003\340A\003\022N\n\017enc" - + "ryption_info\030\010 \003(\01320.google.spanner.admi" - + "n.database.v1.EncryptionInfoB\003\340A\003\022%\n\030ver" - + "sion_retention_period\030\006 \001(\tB\003\340A\003\022>\n\025earl" - + "iest_version_time\030\007 \001(\0132\032.google.protobu" - + "f.TimestampB\003\340A\003\022\033\n\016default_leader\030\t \001(\t" - + "B\003\340A\003\022P\n\020database_dialect\030\n \001(\01621.google" - + ".spanner.admin.database.v1.DatabaseDiale" - + "ctB\003\340A\003\022\036\n\026enable_drop_protection\030\013 \001(\010\022" - + "\030\n\013reconciling\030\014 \001(\010B\003\340A\003\"M\n\005State\022\025\n\021ST" - + "ATE_UNSPECIFIED\020\000\022\014\n\010CREATING\020\001\022\t\n\005READY" - + "\020\002\022\024\n\020READY_OPTIMIZING\020\003:b\352A_\n\037spanner.g" - + "oogleapis.com/Database\022\332A\006parent\202\323\344\223\002/\022-" - + "/v1/{parent=projects/*/instances/*}/data" - + "bases\022\244\002\n\016CreateDatabase\0227.google.spanne" - + "r.admin.database.v1.CreateDatabaseReques" - + "t\032\035.google.longrunning.Operation\"\271\001\312Ad\n)" - + "google.spanner.admin.database.v1.Databas" - + "e\0227google.spanner.admin.database.v1.Crea" - + "teDatabaseMetadata\332A\027parent,create_state" - + "ment\202\323\344\223\0022\"-/v1/{parent=projects/*/insta" - + "nces/*}/databases:\001*\022\255\001\n\013GetDatabase\0224.g" - + "oogle.spanner.admin.database.v1.GetDatab" - + "aseRequest\032*.google.spanner.admin.databa" - + "se.v1.Database\"<\332A\004name\202\323\344\223\002/\022-/v1/{name" - + "=projects/*/instances/*/databases/*}\022\357\001\n" - + "\016UpdateDatabase\0227.google.spanner.admin.d" - + "atabase.v1.UpdateDatabaseRequest\032\035.googl" - + "e.longrunning.Operation\"\204\001\312A\"\n\010Database\022" - + "\026UpdateDatabaseMetadata\332A\024database,updat" - + "e_mask\202\323\344\223\002B26/v1/{database.name=project" - + "s/*/instances/*/databases/*}:\010database\022\235" - + "\002\n\021UpdateDatabaseDdl\022:.google.spanner.ad" - + "min.database.v1.UpdateDatabaseDdlRequest" - + "\032\035.google.longrunning.Operation\"\254\001\312AS\n\025g" - + "oogle.protobuf.Empty\022:google.spanner.adm" - + "in.database.v1.UpdateDatabaseDdlMetadata" - + "\332A\023database,statements\202\323\344\223\002:25/v1/{datab" - + "ase=projects/*/instances/*/databases/*}/" - + "ddl:\001*\022\243\001\n\014DropDatabase\0225.google.spanner" - + ".admin.database.v1.DropDatabaseRequest\032\026" - + ".google.protobuf.Empty\"D\332A\010database\202\323\344\223\002" - + "3*1/v1/{database=projects/*/instances/*/" - + "databases/*}\022\315\001\n\016GetDatabaseDdl\0227.google" - + ".spanner.admin.database.v1.GetDatabaseDd" - + "lRequest\0328.google.spanner.admin.database" - + ".v1.GetDatabaseDdlResponse\"H\332A\010database\202" - + "\323\344\223\0027\0225/v1/{database=projects/*/instance" - + "s/*/databases/*}/ddl\022\302\002\n\014SetIamPolicy\022\"." - + "google.iam.v1.SetIamPolicyRequest\032\025.goog" - + "le.iam.v1.Policy\"\366\001\332A\017resource,policy\202\323\344" - + "\223\002\335\001\">/v1/{resource=projects/*/instances" - + "/*/databases/*}:setIamPolicy:\001*ZA\"/v1/{resource=projects/*/instances/*" - + "/databases/*}:getIamPolicy:\001*ZA\".google.sp" - + "anner.admin.database.v1.ListBackupOperat" - + "ionsResponse\"E\332A\006parent\202\323\344\223\0026\0224/v1/{pare" - + "nt=projects/*/instances/*}/backupOperati" - + "ons\022\334\001\n\021ListDatabaseRoles\022:.google.spann" - + "er.admin.database.v1.ListDatabaseRolesRe" - + "quest\032;.google.spanner.admin.database.v1" - + ".ListDatabaseRolesResponse\"N\332A\006parent\202\323\344" - + "\223\002?\022=/v1/{parent=projects/*/instances/*/" - + "databases/*}/databaseRoles\022\350\001\n\016AddSplitP" - + "oints\0227.google.spanner.admin.database.v1" - + ".AddSplitPointsRequest\0328.google.spanner." - + "admin.database.v1.AddSplitPointsResponse" - + "\"c\332A\025database,split_points\202\323\344\223\002E\"@/v1/{d" - + "atabase=projects/*/instances/*/databases" - + "/*}:addSplitPoints:\001*\022\216\002\n\024CreateBackupSc" - + "hedule\022=.google.spanner.admin.database.v" - + "1.CreateBackupScheduleRequest\0320.google.s" - + "panner.admin.database.v1.BackupSchedule\"" - + "\204\001\332A)parent,backup_schedule,backup_sched" - + "ule_id\202\323\344\223\002R\"?/v1/{parent=projects/*/ins" - + "tances/*/databases/*}/backupSchedules:\017b" - + "ackup_schedule\022\321\001\n\021GetBackupSchedule\022:.g" - + "oogle.spanner.admin.database.v1.GetBacku" - + "pScheduleRequest\0320.google.spanner.admin." - + "database.v1.BackupSchedule\"N\332A\004name\202\323\344\223\002" - + "A\022?/v1/{name=projects/*/instances/*/data" - + "bases/*/backupSchedules/*}\022\220\002\n\024UpdateBac" - + "kupSchedule\022=.google.spanner.admin.datab" - + "ase.v1.UpdateBackupScheduleRequest\0320.goo" - + "gle.spanner.admin.database.v1.BackupSche" - + "dule\"\206\001\332A\033backup_schedule,update_mask\202\323\344" - + "\223\002b2O/v1/{backup_schedule.name=projects/" - + "*/instances/*/databases/*/backupSchedule" - + "s/*}:\017backup_schedule\022\275\001\n\024DeleteBackupSc" - + "hedule\022=.google.spanner.admin.database.v" - + "1.DeleteBackupScheduleRequest\032\026.google.p" - + "rotobuf.Empty\"N\332A\004name\202\323\344\223\002A*?/v1/{name=" - + "projects/*/instances/*/databases/*/backu" - + "pSchedules/*}\022\344\001\n\023ListBackupSchedules\022<." - + "google.spanner.admin.database.v1.ListBac" - + "kupSchedulesRequest\032=.google.spanner.adm" - + "in.database.v1.ListBackupSchedulesRespon" - + "se\"P\332A\006parent\202\323\344\223\002A\022?/v1/{parent=project" - + "s/*/instances/*/databases/*}/backupSched" - + "ules\032x\312A\026spanner.googleapis.com\322A\\https:" - + "//www.googleapis.com/auth/cloud-platform" - + ",https://www.googleapis.com/auth/spanner" - + ".adminB\326\003\n$com.google.spanner.admin.data" - + "base.v1B\031SpannerDatabaseAdminProtoP\001ZFcl" - + "oud.google.com/go/spanner/admin/database" - + "/apiv1/databasepb;databasepb\252\002&Google.Cl" - + "oud.Spanner.Admin.Database.V1\312\002&Google\\C" - + "loud\\Spanner\\Admin\\Database\\V1\352\002+Google:" - + ":Cloud::Spanner::Admin::Database::V1\352AJ\n" - + "\037spanner.googleapis.com/Instance\022\'projec" - + "ts/{project}/instances/{instance}\352A{\n(sp" - + "anner.googleapis.com/InstancePartition\022O" - + "projects/{project}/instances/{instance}/" - + "instancePartitions/{instance_partition}b" - + "\006proto3" + + "/longrunning/operations.proto\032\033google/protobuf/empty.proto\032" + + " google/protobuf/field_mask.proto\032\034google/protobuf/struct.pro" + + "to\032\037google/protobuf/timestamp.proto\032\027goo" + + "gle/rpc/status.proto\032-google/spanner/admin/database/v1/backup.proto\0326google/span" + + "ner/admin/database/v1/backup_schedule.pr" + + "oto\032-google/spanner/admin/database/v1/common.proto\"\253\001\n" + + "\013RestoreInfo\022H\n" + + "\013source_type\030\001" + + " \001(\01623.google.spanner.admin.database.v1.RestoreSourceType\022C\n" + + "\013backup_info\030\002 \001(" + + "\0132,.google.spanner.admin.database.v1.BackupInfoH\000B\r\n" + + "\013source_info\"\312\006\n" + + "\010Database\022\021\n" + + "\004name\030\001 \001(\tB\003\340A\002\022D\n" + + "\005state\030\002 \001(\01620.google" + + ".spanner.admin.database.v1.Database.StateB\003\340A\003\0224\n" + + "\013create_time\030\003 \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022H\n" + + "\014restore_info\030\004 \001" + + "(\0132-.google.spanner.admin.database.v1.RestoreInfoB\003\340A\003\022R\n" + + "\021encryption_config\030\005 \001(" + + "\01322.google.spanner.admin.database.v1.EncryptionConfigB\003\340A\003\022N\n" + + "\017encryption_info\030\010 " + + "\003(\01320.google.spanner.admin.database.v1.EncryptionInfoB\003\340A\003\022%\n" + + "\030version_retention_period\030\006 \001(\tB\003\340A\003\022>\n" + + "\025earliest_version_time\030\007" + + " \001(\0132\032.google.protobuf.TimestampB\003\340A\003\022\033\n" + + "\016default_leader\030\t \001(\tB\003\340A\003\022P\n" + + "\020database_dialect\030\n" + + " \001(\01621.google.spanner.admin.database.v1.DatabaseDialectB\003\340A\003\022\036\n" + + "\026enable_drop_protection\030\013 \001(\010\022\030\n" + + "\013reconciling\030\014 \001(\010B\003\340A\003\"M\n" + + "\005State\022\025\n" + + "\021STATE_UNSPECIFIED\020\000\022\014\n" + + "\010CREATING\020\001\022\t\n" + + "\005READY\020\002\022\024\n" + + "\020READY_OPTIMIZING\020\003:b\352A_\n" + + "\037spanner.googleapis.com/D" + + "atabase\022\332A\006parent\202" + + "\323\344\223\002/\022-/v1/{parent=projects/*/instances/*}/databases\022\244\002\n" + + "\016CreateDatabase\0227.google.spanner.admin.database.v1.CreateDatabas" + + "eRequest\032\035.google.longrunning.Operation\"\271\001\312Ad\n" + + ")google.spanner.admin.database.v1.Database\0227google.spanner.admin.database." + + "v1.CreateDatabaseMetadata\332A\027parent,creat" + + "e_statement\202\323\344\223\0022\"-/v1/{parent=projects/*/instances/*}/databases:\001*\022\255\001\n" + + "\013GetDatabase\0224.google.spanner.admin.database.v1.G" + + "etDatabaseRequest\032*.google.spanner.admin" + + ".database.v1.Database\"<\332A\004name\202\323\344\223\002/\022-/v" + + "1/{name=projects/*/instances/*/databases/*}\022\357\001\n" + + "\016UpdateDatabase\0227.google.spanner." + + "admin.database.v1.UpdateDatabaseRequest\032\035.google.longrunning.Operation\"\204\001\312A\"\n" + + "\010Database\022\026UpdateDatabaseMetadata\332A\024databas" + + "e,update_mask\202\323\344\223\002B26/v1/{database.name=" + + "projects/*/instances/*/databases/*}:\010database\022\235\002\n" + + "\021UpdateDatabaseDdl\022:.google.spanner.admin.database.v1.UpdateDatabaseDdl" + + "Request\032\035.google.longrunning.Operation\"\254\001\312AS\n" + + "\025google.protobuf.Empty\022:google.spanner.admin.database.v1.UpdateDatabaseDdlM" + + "etadata\332A\023database,statements\202\323\344\223\002:25/v1" + + "/{database=projects/*/instances/*/databases/*}/ddl:\001*\022\243\001\n" + + "\014DropDatabase\0225.google.spanner.admin.database.v1.DropDatabaseRe" + + "quest\032\026.google.protobuf.Empty\"D\332A\010databa" + + "se\202\323\344\223\0023*1/v1/{database=projects/*/instances/*/databases/*}\022\315\001\n" + + "\016GetDatabaseDdl\0227.google.spanner.admin.database.v1.GetDat" + + "abaseDdlRequest\0328.google.spanner.admin.d" + + "atabase.v1.GetDatabaseDdlResponse\"H\332A\010da" + + "tabase\202\323\344\223\0027\0225/v1/{database=projects/*/instances/*/databases/*}/ddl\022\302\002\n" + + "\014SetIamPolicy\022\".google.iam.v1.SetIamPolicyRequest" + + "\032\025.google.iam.v1.Policy\"\366\001\332A\017resource,po" + + "licy\202\323\344\223\002\335\001\">/v1/{resource=projects/*/in" + + "stances/*/databases/*}:setIamPolicy:\001*ZA\"/v1/{resource=projects/*/inst" + + "ances/*/databases/*}:getIamPolicy:\001*ZA\".google.spanner.admin.database.v1.ListBacku" + + "pOperationsResponse\"E\332A\006parent\202\323\344\223\0026\0224/v" + + "1/{parent=projects/*/instances/*}/backupOperations\022\334\001\n" + + "\021ListDatabaseRoles\022:.google.spanner.admin.database.v1.ListDatabase" + + "RolesRequest\032;.google.spanner.admin.data" + + "base.v1.ListDatabaseRolesResponse\"N\332A\006pa" + + "rent\202\323\344\223\002?\022=/v1/{parent=projects/*/instances/*/databases/*}/databaseRoles\022\350\001\n" + + "\016AddSplitPoints\0227.google.spanner.admin.data" + + "base.v1.AddSplitPointsRequest\0328.google.spanner.admin.database.v1.AddSplitPointsR" + + "esponse\"c\332A\025database,split_points\202\323\344\223\002E\"" + + "@/v1/{database=projects/*/instances/*/databases/*}:addSplitPoints:\001*\022\216\002\n" + + "\024CreateBackupSchedule\022=.google.spanner.admin.dat" + + "abase.v1.CreateBackupScheduleRequest\0320.google.spanner.admin.database.v1.BackupSc" + + "hedule\"\204\001\332A)parent,backup_schedule,backu" + + "p_schedule_id\202\323\344\223\002R\"?/v1/{parent=project" + + "s/*/instances/*/databases/*}/backupSchedules:\017backup_schedule\022\321\001\n" + + "\021GetBackupSchedule\022:.google.spanner.admin.database.v1.G" + + "etBackupScheduleRequest\0320.google.spanner" + + ".admin.database.v1.BackupSchedule\"N\332A\004na" + + "me\202\323\344\223\002A\022?/v1/{name=projects/*/instances/*/databases/*/backupSchedules/*}\022\220\002\n" + + "\024UpdateBackupSchedule\022=.google.spanner.admi" + + "n.database.v1.UpdateBackupScheduleRequest\0320.google.spanner.admin.database.v1.Bac" + + "kupSchedule\"\206\001\332A\033backup_schedule,update_" + + "mask\202\323\344\223\002b2O/v1/{backup_schedule.name=pr" + + "ojects/*/instances/*/databases/*/backupSchedules/*}:\017backup_schedule\022\275\001\n" + + "\024DeleteBackupSchedule\022=.google.spanner.admin.dat" + + "abase.v1.DeleteBackupScheduleRequest\032\026.g" + + "oogle.protobuf.Empty\"N\332A\004name\202\323\344\223\002A*?/v1" + + "/{name=projects/*/instances/*/databases/*/backupSchedules/*}\022\344\001\n" + + "\023ListBackupSchedules\022<.google.spanner.admin.database.v1." + + "ListBackupSchedulesRequest\032=.google.spanner.admin.database.v1.ListBackupSchedule" + + "sResponse\"P\332A\006parent\202\323\344\223\002A\022?/v1/{parent=" + + "projects/*/instances/*/databases/*}/backupSchedules\022\307\001\n" + + "\034InternalUpdateGraphOperation\022E.google.spanner.admin.database.v1." + + "InternalUpdateGraphOperationRequest\032F.google.spanner.admin.database.v1.InternalU" + + "pdateGraphOperationResponse\"\030\332A\025database" + + ",operation_id\032x\312A\026spanner.googleapis.com" + + "\322A\\https://www.googleapis.com/auth/cloud" + + "-platform,https://www.googleapis.com/auth/spanner.adminB\326\003\n" + + "$com.google.spanner.admin.database.v1B\031SpannerDatabaseAdminPr" + + "otoP\001ZFcloud.google.com/go/spanner/admin" + + "/database/apiv1/databasepb;databasepb\252\002&" + + "Google.Cloud.Spanner.Admin.Database.V1\312\002" + + "&Google\\Cloud\\Spanner\\Admin\\Database\\V1\352" + + "\002+Google::Cloud::Spanner::Admin::Database::V1\352AJ\n" + + "\037spanner.googleapis.com/Instanc" + + "e\022\'projects/{project}/instances/{instance}\352A{\n" + + "(spanner.googleapis.com/InstancePartition\022Oprojects/{project}/instances/{i" + + "nstance}/instancePartitions/{instance_partition}b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( @@ -491,6 +504,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { com.google.protobuf.FieldMaskProto.getDescriptor(), com.google.protobuf.StructProto.getDescriptor(), com.google.protobuf.TimestampProto.getDescriptor(), + com.google.rpc.StatusProto.getDescriptor(), com.google.spanner.admin.database.v1.BackupProto.getDescriptor(), com.google.spanner.admin.database.v1.BackupScheduleProto.getDescriptor(), com.google.spanner.admin.database.v1.CommonProto.getDescriptor(), @@ -741,6 +755,20 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new java.lang.String[] { "KeyParts", }); + internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationRequest_descriptor = + getDescriptor().getMessageTypes().get(27); + internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationRequest_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationRequest_descriptor, + new java.lang.String[] { + "Database", "OperationId", "VmIdentityToken", "Progress", "Status", + }); + internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationResponse_descriptor = + getDescriptor().getMessageTypes().get(28); + internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationResponse_fieldAccessorTable = + new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( + internal_static_google_spanner_admin_database_v1_InternalUpdateGraphOperationResponse_descriptor, + new java.lang.String[] {}); com.google.protobuf.ExtensionRegistry registry = com.google.protobuf.ExtensionRegistry.newInstance(); registry.add(com.google.api.ClientProto.defaultHost); @@ -765,6 +793,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { com.google.protobuf.FieldMaskProto.getDescriptor(); com.google.protobuf.StructProto.getDescriptor(); com.google.protobuf.TimestampProto.getDescriptor(); + com.google.rpc.StatusProto.getDescriptor(); com.google.spanner.admin.database.v1.BackupProto.getDescriptor(); com.google.spanner.admin.database.v1.BackupScheduleProto.getDescriptor(); com.google.spanner.admin.database.v1.CommonProto.getDescriptor(); diff --git a/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/spanner_database_admin.proto b/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/spanner_database_admin.proto index 36e06f1e1f0..d41a4114c20 100644 --- a/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/spanner_database_admin.proto +++ b/proto-google-cloud-spanner-admin-database-v1/src/main/proto/google/spanner/admin/database/v1/spanner_database_admin.proto @@ -27,6 +27,7 @@ import "google/protobuf/empty.proto"; import "google/protobuf/field_mask.proto"; import "google/protobuf/struct.proto"; import "google/protobuf/timestamp.proto"; +import "google/rpc/status.proto"; import "google/spanner/admin/database/v1/backup.proto"; import "google/spanner/admin/database/v1/backup_schedule.proto"; import "google/spanner/admin/database/v1/common.proto"; @@ -485,6 +486,13 @@ service DatabaseAdmin { }; option (google.api.method_signature) = "parent"; } + + // This is an internal API called by Spanner Graph jobs. You should never need + // to call this API directly. + rpc InternalUpdateGraphOperation(InternalUpdateGraphOperationRequest) + returns (InternalUpdateGraphOperationResponse) { + option (google.api.method_signature) = "database,operation_id"; + } } // Information about the database restore. @@ -1282,3 +1290,25 @@ message SplitPoints { google.protobuf.Timestamp expire_time = 5 [(google.api.field_behavior) = OPTIONAL]; } + +// Internal request proto, do not use directly. +message InternalUpdateGraphOperationRequest { + // Internal field, do not use directly. + string database = 1 [ + (google.api.field_behavior) = REQUIRED, + (google.api.resource_reference) = { + type: "spanner.googleapis.com/Database" + } + ]; + // Internal field, do not use directly. + string operation_id = 2 [(google.api.field_behavior) = REQUIRED]; + // Internal field, do not use directly. + string vm_identity_token = 5 [(google.api.field_behavior) = REQUIRED]; + // Internal field, do not use directly. + double progress = 3 [(google.api.field_behavior) = OPTIONAL]; + // Internal field, do not use directly. + google.rpc.Status status = 6 [(google.api.field_behavior) = OPTIONAL]; +} + +// Internal response proto, do not use directly. +message InternalUpdateGraphOperationResponse {} diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponse.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponse.java index c80fd37e814..7c804c12c31 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponse.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponse.java @@ -746,7 +746,7 @@ public com.google.protobuf.TimestampOrBuilder getCommitTimestampOrBuilder() { * * *
-   * The statistics about this Commit. Not returned by default.
+   * The statistics about this `Commit`. Not returned by default.
    * For more information, see
    * [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
    * 
@@ -764,7 +764,7 @@ public boolean hasCommitStats() { * * *
-   * The statistics about this Commit. Not returned by default.
+   * The statistics about this `Commit`. Not returned by default.
    * For more information, see
    * [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
    * 
@@ -784,7 +784,7 @@ public com.google.spanner.v1.CommitResponse.CommitStats getCommitStats() { * * *
-   * The statistics about this Commit. Not returned by default.
+   * The statistics about this `Commit`. Not returned by default.
    * For more information, see
    * [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
    * 
@@ -805,7 +805,7 @@ public com.google.spanner.v1.CommitResponse.CommitStatsOrBuilder getCommitStatsO * *
    * If specified, transaction has not committed yet.
-   * Clients must retry the commit with the new precommit token.
+   * You must retry the commit with the new precommit token.
    * 
* * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; @@ -822,7 +822,7 @@ public boolean hasPrecommitToken() { * *
    * If specified, transaction has not committed yet.
-   * Clients must retry the commit with the new precommit token.
+   * You must retry the commit with the new precommit token.
    * 
* * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; @@ -842,7 +842,7 @@ public com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken( * *
    * If specified, transaction has not committed yet.
-   * Clients must retry the commit with the new precommit token.
+   * You must retry the commit with the new precommit token.
    * 
* * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; @@ -856,6 +856,68 @@ public com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken( return com.google.spanner.v1.MultiplexedSessionPrecommitToken.getDefaultInstance(); } + public static final int SNAPSHOT_TIMESTAMP_FIELD_NUMBER = 5; + private com.google.protobuf.Timestamp snapshotTimestamp_; + + /** + * + * + *
+   * If `TransactionOptions.isolation_level` is set to
+   * `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
+   * timestamp at which all reads in the transaction ran. This timestamp is
+   * never returned.
+   * 
+ * + * .google.protobuf.Timestamp snapshot_timestamp = 5; + * + * @return Whether the snapshotTimestamp field is set. + */ + @java.lang.Override + public boolean hasSnapshotTimestamp() { + return ((bitField0_ & 0x00000004) != 0); + } + + /** + * + * + *
+   * If `TransactionOptions.isolation_level` is set to
+   * `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
+   * timestamp at which all reads in the transaction ran. This timestamp is
+   * never returned.
+   * 
+ * + * .google.protobuf.Timestamp snapshot_timestamp = 5; + * + * @return The snapshotTimestamp. + */ + @java.lang.Override + public com.google.protobuf.Timestamp getSnapshotTimestamp() { + return snapshotTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : snapshotTimestamp_; + } + + /** + * + * + *
+   * If `TransactionOptions.isolation_level` is set to
+   * `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
+   * timestamp at which all reads in the transaction ran. This timestamp is
+   * never returned.
+   * 
+ * + * .google.protobuf.Timestamp snapshot_timestamp = 5; + */ + @java.lang.Override + public com.google.protobuf.TimestampOrBuilder getSnapshotTimestampOrBuilder() { + return snapshotTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : snapshotTimestamp_; + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -880,6 +942,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io output.writeMessage( 4, (com.google.spanner.v1.MultiplexedSessionPrecommitToken) multiplexedSessionRetry_); } + if (((bitField0_ & 0x00000004) != 0)) { + output.writeMessage(5, getSnapshotTimestamp()); + } getUnknownFields().writeTo(output); } @@ -900,6 +965,9 @@ public int getSerializedSize() { com.google.protobuf.CodedOutputStream.computeMessageSize( 4, (com.google.spanner.v1.MultiplexedSessionPrecommitToken) multiplexedSessionRetry_); } + if (((bitField0_ & 0x00000004) != 0)) { + size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getSnapshotTimestamp()); + } size += getUnknownFields().getSerializedSize(); memoizedSize = size; return size; @@ -923,6 +991,10 @@ public boolean equals(final java.lang.Object obj) { if (hasCommitStats()) { if (!getCommitStats().equals(other.getCommitStats())) return false; } + if (hasSnapshotTimestamp() != other.hasSnapshotTimestamp()) return false; + if (hasSnapshotTimestamp()) { + if (!getSnapshotTimestamp().equals(other.getSnapshotTimestamp())) return false; + } if (!getMultiplexedSessionRetryCase().equals(other.getMultiplexedSessionRetryCase())) return false; switch (multiplexedSessionRetryCase_) { @@ -951,6 +1023,10 @@ public int hashCode() { hash = (37 * hash) + COMMIT_STATS_FIELD_NUMBER; hash = (53 * hash) + getCommitStats().hashCode(); } + if (hasSnapshotTimestamp()) { + hash = (37 * hash) + SNAPSHOT_TIMESTAMP_FIELD_NUMBER; + hash = (53 * hash) + getSnapshotTimestamp().hashCode(); + } switch (multiplexedSessionRetryCase_) { case 4: hash = (37 * hash) + PRECOMMIT_TOKEN_FIELD_NUMBER; @@ -1101,6 +1177,7 @@ private void maybeForceBuilderInitialization() { if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { getCommitTimestampFieldBuilder(); getCommitStatsFieldBuilder(); + getSnapshotTimestampFieldBuilder(); } } @@ -1121,6 +1198,11 @@ public Builder clear() { if (precommitTokenBuilder_ != null) { precommitTokenBuilder_.clear(); } + snapshotTimestamp_ = null; + if (snapshotTimestampBuilder_ != null) { + snapshotTimestampBuilder_.dispose(); + snapshotTimestampBuilder_ = null; + } multiplexedSessionRetryCase_ = 0; multiplexedSessionRetry_ = null; return this; @@ -1170,6 +1252,13 @@ private void buildPartial0(com.google.spanner.v1.CommitResponse result) { commitStatsBuilder_ == null ? commitStats_ : commitStatsBuilder_.build(); to_bitField0_ |= 0x00000002; } + if (((from_bitField0_ & 0x00000008) != 0)) { + result.snapshotTimestamp_ = + snapshotTimestampBuilder_ == null + ? snapshotTimestamp_ + : snapshotTimestampBuilder_.build(); + to_bitField0_ |= 0x00000004; + } result.bitField0_ |= to_bitField0_; } @@ -1232,6 +1321,9 @@ public Builder mergeFrom(com.google.spanner.v1.CommitResponse other) { if (other.hasCommitStats()) { mergeCommitStats(other.getCommitStats()); } + if (other.hasSnapshotTimestamp()) { + mergeSnapshotTimestamp(other.getSnapshotTimestamp()); + } switch (other.getMultiplexedSessionRetryCase()) { case PRECOMMIT_TOKEN: { @@ -1287,6 +1379,13 @@ public Builder mergeFrom( multiplexedSessionRetryCase_ = 4; break; } // case 34 + case 42: + { + input.readMessage( + getSnapshotTimestampFieldBuilder().getBuilder(), extensionRegistry); + bitField0_ |= 0x00000008; + break; + } // case 42 default: { if (!super.parseUnknownField(input, extensionRegistry, tag)) { @@ -1525,7 +1624,7 @@ public com.google.protobuf.TimestampOrBuilder getCommitTimestampOrBuilder() { * * *
-     * The statistics about this Commit. Not returned by default.
+     * The statistics about this `Commit`. Not returned by default.
      * For more information, see
      * [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
      * 
@@ -1542,7 +1641,7 @@ public boolean hasCommitStats() { * * *
-     * The statistics about this Commit. Not returned by default.
+     * The statistics about this `Commit`. Not returned by default.
      * For more information, see
      * [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
      * 
@@ -1565,7 +1664,7 @@ public com.google.spanner.v1.CommitResponse.CommitStats getCommitStats() { * * *
-     * The statistics about this Commit. Not returned by default.
+     * The statistics about this `Commit`. Not returned by default.
      * For more information, see
      * [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
      * 
@@ -1590,7 +1689,7 @@ public Builder setCommitStats(com.google.spanner.v1.CommitResponse.CommitStats v * * *
-     * The statistics about this Commit. Not returned by default.
+     * The statistics about this `Commit`. Not returned by default.
      * For more information, see
      * [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
      * 
@@ -1613,7 +1712,7 @@ public Builder setCommitStats( * * *
-     * The statistics about this Commit. Not returned by default.
+     * The statistics about this `Commit`. Not returned by default.
      * For more information, see
      * [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
      * 
@@ -1644,7 +1743,7 @@ public Builder mergeCommitStats(com.google.spanner.v1.CommitResponse.CommitStats * * *
-     * The statistics about this Commit. Not returned by default.
+     * The statistics about this `Commit`. Not returned by default.
      * For more information, see
      * [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
      * 
@@ -1666,7 +1765,7 @@ public Builder clearCommitStats() { * * *
-     * The statistics about this Commit. Not returned by default.
+     * The statistics about this `Commit`. Not returned by default.
      * For more information, see
      * [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
      * 
@@ -1683,7 +1782,7 @@ public com.google.spanner.v1.CommitResponse.CommitStats.Builder getCommitStatsBu * * *
-     * The statistics about this Commit. Not returned by default.
+     * The statistics about this `Commit`. Not returned by default.
      * For more information, see
      * [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
      * 
@@ -1704,7 +1803,7 @@ public com.google.spanner.v1.CommitResponse.CommitStatsOrBuilder getCommitStatsO * * *
-     * The statistics about this Commit. Not returned by default.
+     * The statistics about this `Commit`. Not returned by default.
      * For more information, see
      * [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
      * 
@@ -1739,7 +1838,7 @@ public com.google.spanner.v1.CommitResponse.CommitStatsOrBuilder getCommitStatsO * *
      * If specified, transaction has not committed yet.
-     * Clients must retry the commit with the new precommit token.
+     * You must retry the commit with the new precommit token.
      * 
* * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; @@ -1756,7 +1855,7 @@ public boolean hasPrecommitToken() { * *
      * If specified, transaction has not committed yet.
-     * Clients must retry the commit with the new precommit token.
+     * You must retry the commit with the new precommit token.
      * 
* * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; @@ -1783,7 +1882,7 @@ public com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken( * *
      * If specified, transaction has not committed yet.
-     * Clients must retry the commit with the new precommit token.
+     * You must retry the commit with the new precommit token.
      * 
* * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; @@ -1807,7 +1906,7 @@ public Builder setPrecommitToken(com.google.spanner.v1.MultiplexedSessionPrecomm * *
      * If specified, transaction has not committed yet.
-     * Clients must retry the commit with the new precommit token.
+     * You must retry the commit with the new precommit token.
      * 
* * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; @@ -1829,7 +1928,7 @@ public Builder setPrecommitToken( * *
      * If specified, transaction has not committed yet.
-     * Clients must retry the commit with the new precommit token.
+     * You must retry the commit with the new precommit token.
      * 
* * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; @@ -1866,7 +1965,7 @@ public Builder mergePrecommitToken( * *
      * If specified, transaction has not committed yet.
-     * Clients must retry the commit with the new precommit token.
+     * You must retry the commit with the new precommit token.
      * 
* * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; @@ -1893,7 +1992,7 @@ public Builder clearPrecommitToken() { * *
      * If specified, transaction has not committed yet.
-     * Clients must retry the commit with the new precommit token.
+     * You must retry the commit with the new precommit token.
      * 
* * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; @@ -1908,7 +2007,7 @@ public Builder clearPrecommitToken() { * *
      * If specified, transaction has not committed yet.
-     * Clients must retry the commit with the new precommit token.
+     * You must retry the commit with the new precommit token.
      * 
* * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; @@ -1931,7 +2030,7 @@ public Builder clearPrecommitToken() { * *
      * If specified, transaction has not committed yet.
-     * Clients must retry the commit with the new precommit token.
+     * You must retry the commit with the new precommit token.
      * 
* * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; @@ -1961,6 +2060,227 @@ public Builder clearPrecommitToken() { return precommitTokenBuilder_; } + private com.google.protobuf.Timestamp snapshotTimestamp_; + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + snapshotTimestampBuilder_; + + /** + * + * + *
+     * If `TransactionOptions.isolation_level` is set to
+     * `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
+     * timestamp at which all reads in the transaction ran. This timestamp is
+     * never returned.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_timestamp = 5; + * + * @return Whether the snapshotTimestamp field is set. + */ + public boolean hasSnapshotTimestamp() { + return ((bitField0_ & 0x00000008) != 0); + } + + /** + * + * + *
+     * If `TransactionOptions.isolation_level` is set to
+     * `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
+     * timestamp at which all reads in the transaction ran. This timestamp is
+     * never returned.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_timestamp = 5; + * + * @return The snapshotTimestamp. + */ + public com.google.protobuf.Timestamp getSnapshotTimestamp() { + if (snapshotTimestampBuilder_ == null) { + return snapshotTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : snapshotTimestamp_; + } else { + return snapshotTimestampBuilder_.getMessage(); + } + } + + /** + * + * + *
+     * If `TransactionOptions.isolation_level` is set to
+     * `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
+     * timestamp at which all reads in the transaction ran. This timestamp is
+     * never returned.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_timestamp = 5; + */ + public Builder setSnapshotTimestamp(com.google.protobuf.Timestamp value) { + if (snapshotTimestampBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + snapshotTimestamp_ = value; + } else { + snapshotTimestampBuilder_.setMessage(value); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * If `TransactionOptions.isolation_level` is set to
+     * `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
+     * timestamp at which all reads in the transaction ran. This timestamp is
+     * never returned.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_timestamp = 5; + */ + public Builder setSnapshotTimestamp(com.google.protobuf.Timestamp.Builder builderForValue) { + if (snapshotTimestampBuilder_ == null) { + snapshotTimestamp_ = builderForValue.build(); + } else { + snapshotTimestampBuilder_.setMessage(builderForValue.build()); + } + bitField0_ |= 0x00000008; + onChanged(); + return this; + } + + /** + * + * + *
+     * If `TransactionOptions.isolation_level` is set to
+     * `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
+     * timestamp at which all reads in the transaction ran. This timestamp is
+     * never returned.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_timestamp = 5; + */ + public Builder mergeSnapshotTimestamp(com.google.protobuf.Timestamp value) { + if (snapshotTimestampBuilder_ == null) { + if (((bitField0_ & 0x00000008) != 0) + && snapshotTimestamp_ != null + && snapshotTimestamp_ != com.google.protobuf.Timestamp.getDefaultInstance()) { + getSnapshotTimestampBuilder().mergeFrom(value); + } else { + snapshotTimestamp_ = value; + } + } else { + snapshotTimestampBuilder_.mergeFrom(value); + } + if (snapshotTimestamp_ != null) { + bitField0_ |= 0x00000008; + onChanged(); + } + return this; + } + + /** + * + * + *
+     * If `TransactionOptions.isolation_level` is set to
+     * `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
+     * timestamp at which all reads in the transaction ran. This timestamp is
+     * never returned.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_timestamp = 5; + */ + public Builder clearSnapshotTimestamp() { + bitField0_ = (bitField0_ & ~0x00000008); + snapshotTimestamp_ = null; + if (snapshotTimestampBuilder_ != null) { + snapshotTimestampBuilder_.dispose(); + snapshotTimestampBuilder_ = null; + } + onChanged(); + return this; + } + + /** + * + * + *
+     * If `TransactionOptions.isolation_level` is set to
+     * `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
+     * timestamp at which all reads in the transaction ran. This timestamp is
+     * never returned.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_timestamp = 5; + */ + public com.google.protobuf.Timestamp.Builder getSnapshotTimestampBuilder() { + bitField0_ |= 0x00000008; + onChanged(); + return getSnapshotTimestampFieldBuilder().getBuilder(); + } + + /** + * + * + *
+     * If `TransactionOptions.isolation_level` is set to
+     * `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
+     * timestamp at which all reads in the transaction ran. This timestamp is
+     * never returned.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_timestamp = 5; + */ + public com.google.protobuf.TimestampOrBuilder getSnapshotTimestampOrBuilder() { + if (snapshotTimestampBuilder_ != null) { + return snapshotTimestampBuilder_.getMessageOrBuilder(); + } else { + return snapshotTimestamp_ == null + ? com.google.protobuf.Timestamp.getDefaultInstance() + : snapshotTimestamp_; + } + } + + /** + * + * + *
+     * If `TransactionOptions.isolation_level` is set to
+     * `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
+     * timestamp at which all reads in the transaction ran. This timestamp is
+     * never returned.
+     * 
+ * + * .google.protobuf.Timestamp snapshot_timestamp = 5; + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder> + getSnapshotTimestampFieldBuilder() { + if (snapshotTimestampBuilder_ == null) { + snapshotTimestampBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.protobuf.Timestamp, + com.google.protobuf.Timestamp.Builder, + com.google.protobuf.TimestampOrBuilder>( + getSnapshotTimestamp(), getParentForChildren(), isClean()); + snapshotTimestamp_ = null; + } + return snapshotTimestampBuilder_; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponseOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponseOrBuilder.java index 7319b6760c0..9b8eccd6b47 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponseOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponseOrBuilder.java @@ -65,7 +65,7 @@ public interface CommitResponseOrBuilder * * *
-   * The statistics about this Commit. Not returned by default.
+   * The statistics about this `Commit`. Not returned by default.
    * For more information, see
    * [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
    * 
@@ -80,7 +80,7 @@ public interface CommitResponseOrBuilder * * *
-   * The statistics about this Commit. Not returned by default.
+   * The statistics about this `Commit`. Not returned by default.
    * For more information, see
    * [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
    * 
@@ -95,7 +95,7 @@ public interface CommitResponseOrBuilder * * *
-   * The statistics about this Commit. Not returned by default.
+   * The statistics about this `Commit`. Not returned by default.
    * For more information, see
    * [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats].
    * 
@@ -109,7 +109,7 @@ public interface CommitResponseOrBuilder * *
    * If specified, transaction has not committed yet.
-   * Clients must retry the commit with the new precommit token.
+   * You must retry the commit with the new precommit token.
    * 
* * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; @@ -123,7 +123,7 @@ public interface CommitResponseOrBuilder * *
    * If specified, transaction has not committed yet.
-   * Clients must retry the commit with the new precommit token.
+   * You must retry the commit with the new precommit token.
    * 
* * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; @@ -137,12 +137,58 @@ public interface CommitResponseOrBuilder * *
    * If specified, transaction has not committed yet.
-   * Clients must retry the commit with the new precommit token.
+   * You must retry the commit with the new precommit token.
    * 
* * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 4; */ com.google.spanner.v1.MultiplexedSessionPrecommitTokenOrBuilder getPrecommitTokenOrBuilder(); + /** + * + * + *
+   * If `TransactionOptions.isolation_level` is set to
+   * `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
+   * timestamp at which all reads in the transaction ran. This timestamp is
+   * never returned.
+   * 
+ * + * .google.protobuf.Timestamp snapshot_timestamp = 5; + * + * @return Whether the snapshotTimestamp field is set. + */ + boolean hasSnapshotTimestamp(); + + /** + * + * + *
+   * If `TransactionOptions.isolation_level` is set to
+   * `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
+   * timestamp at which all reads in the transaction ran. This timestamp is
+   * never returned.
+   * 
+ * + * .google.protobuf.Timestamp snapshot_timestamp = 5; + * + * @return The snapshotTimestamp. + */ + com.google.protobuf.Timestamp getSnapshotTimestamp(); + + /** + * + * + *
+   * If `TransactionOptions.isolation_level` is set to
+   * `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the
+   * timestamp at which all reads in the transaction ran. This timestamp is
+   * never returned.
+   * 
+ * + * .google.protobuf.Timestamp snapshot_timestamp = 5; + */ + com.google.protobuf.TimestampOrBuilder getSnapshotTimestampOrBuilder(); + com.google.spanner.v1.CommitResponse.MultiplexedSessionRetryCase getMultiplexedSessionRetryCase(); } diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponseProto.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponseProto.java index d68e82f1c35..24191aa23eb 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponseProto.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/CommitResponseProto.java @@ -48,19 +48,20 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { "\n\'google/spanner/v1/commit_response.prot" + "o\022\021google.spanner.v1\032\037google/protobuf/ti" + "mestamp.proto\032#google/spanner/v1/transac" - + "tion.proto\"\235\002\n\016CommitResponse\0224\n\020commit_" + + "tion.proto\"\325\002\n\016CommitResponse\0224\n\020commit_" + "timestamp\030\001 \001(\0132\032.google.protobuf.Timest" + "amp\022C\n\014commit_stats\030\002 \001(\0132-.google.spann" + "er.v1.CommitResponse.CommitStats\022N\n\017prec" + "ommit_token\030\004 \001(\01323.google.spanner.v1.Mu" - + "ltiplexedSessionPrecommitTokenH\000\032%\n\013Comm" - + "itStats\022\026\n\016mutation_count\030\001 \001(\003B\031\n\027Multi" - + "plexedSessionRetryB\266\001\n\025com.google.spanne" - + "r.v1B\023CommitResponseProtoP\001Z5cloud.googl" - + "e.com/go/spanner/apiv1/spannerpb;spanner" - + "pb\252\002\027Google.Cloud.Spanner.V1\312\002\027Google\\Cl" - + "oud\\Spanner\\V1\352\002\032Google::Cloud::Spanner:" - + ":V1b\006proto3" + + "ltiplexedSessionPrecommitTokenH\000\0226\n\022snap" + + "shot_timestamp\030\005 \001(\0132\032.google.protobuf.T" + + "imestamp\032%\n\013CommitStats\022\026\n\016mutation_coun" + + "t\030\001 \001(\003B\031\n\027MultiplexedSessionRetryB\266\001\n\025c" + + "om.google.spanner.v1B\023CommitResponseProt" + + "oP\001Z5cloud.google.com/go/spanner/apiv1/s" + + "pannerpb;spannerpb\252\002\027Google.Cloud.Spanne" + + "r.V1\312\002\027Google\\Cloud\\Spanner\\V1\352\002\032Google:" + + ":Cloud::Spanner::V1b\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( @@ -75,7 +76,11 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_spanner_v1_CommitResponse_descriptor, new java.lang.String[] { - "CommitTimestamp", "CommitStats", "PrecommitToken", "MultiplexedSessionRetry", + "CommitTimestamp", + "CommitStats", + "PrecommitToken", + "SnapshotTimestamp", + "MultiplexedSessionRetry", }); internal_static_google_spanner_v1_CommitResponse_CommitStats_descriptor = internal_static_google_spanner_v1_CommitResponse_descriptor.getNestedTypes().get(0); diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/MultiplexedSessionPrecommitToken.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/MultiplexedSessionPrecommitToken.java index 5af42128eaf..cafcfb50614 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/MultiplexedSessionPrecommitToken.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/MultiplexedSessionPrecommitToken.java @@ -25,8 +25,10 @@ *
  * When a read-write transaction is executed on a multiplexed session,
  * this precommit token is sent back to the client
- * as a part of the [Transaction] message in the BeginTransaction response and
- * also as a part of the [ResultSet] and [PartialResultSet] responses.
+ * as a part of the [Transaction][google.spanner.v1.Transaction] message in the
+ * [BeginTransaction][google.spanner.v1.BeginTransactionRequest] response and
+ * also as a part of the [ResultSet][google.spanner.v1.ResultSet] and
+ * [PartialResultSet][google.spanner.v1.PartialResultSet] responses.
  * 
* * Protobuf type {@code google.spanner.v1.MultiplexedSessionPrecommitToken} @@ -284,8 +286,10 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build *
    * When a read-write transaction is executed on a multiplexed session,
    * this precommit token is sent back to the client
-   * as a part of the [Transaction] message in the BeginTransaction response and
-   * also as a part of the [ResultSet] and [PartialResultSet] responses.
+   * as a part of the [Transaction][google.spanner.v1.Transaction] message in the
+   * [BeginTransaction][google.spanner.v1.BeginTransactionRequest] response and
+   * also as a part of the [ResultSet][google.spanner.v1.ResultSet] and
+   * [PartialResultSet][google.spanner.v1.PartialResultSet] responses.
    * 
* * Protobuf type {@code google.spanner.v1.MultiplexedSessionPrecommitToken} diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Transaction.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Transaction.java index 92b64c5873b..1745b741255 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Transaction.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/Transaction.java @@ -166,15 +166,13 @@ public com.google.protobuf.TimestampOrBuilder getReadTimestampOrBuilder() { * * *
-   * A precommit token will be included in the response of a BeginTransaction
+   * A precommit token is included in the response of a BeginTransaction
    * request if the read-write transaction is on a multiplexed session and
    * a mutation_key was specified in the
    * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    * The precommit token with the highest sequence number from this transaction
    * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    * request for this transaction.
-   * This feature is not yet supported and will result in an UNIMPLEMENTED
-   * error.
    * 
* * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; @@ -190,15 +188,13 @@ public boolean hasPrecommitToken() { * * *
-   * A precommit token will be included in the response of a BeginTransaction
+   * A precommit token is included in the response of a BeginTransaction
    * request if the read-write transaction is on a multiplexed session and
    * a mutation_key was specified in the
    * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    * The precommit token with the highest sequence number from this transaction
    * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    * request for this transaction.
-   * This feature is not yet supported and will result in an UNIMPLEMENTED
-   * error.
    * 
* * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; @@ -216,15 +212,13 @@ public com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken( * * *
-   * A precommit token will be included in the response of a BeginTransaction
+   * A precommit token is included in the response of a BeginTransaction
    * request if the read-write transaction is on a multiplexed session and
    * a mutation_key was specified in the
    * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    * The precommit token with the highest sequence number from this transaction
    * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    * request for this transaction.
-   * This feature is not yet supported and will result in an UNIMPLEMENTED
-   * error.
    * 
* * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; @@ -981,15 +975,13 @@ public com.google.protobuf.TimestampOrBuilder getReadTimestampOrBuilder() { * * *
-     * A precommit token will be included in the response of a BeginTransaction
+     * A precommit token is included in the response of a BeginTransaction
      * request if the read-write transaction is on a multiplexed session and
      * a mutation_key was specified in the
      * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
      * The precommit token with the highest sequence number from this transaction
      * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
      * request for this transaction.
-     * This feature is not yet supported and will result in an UNIMPLEMENTED
-     * error.
      * 
* * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; @@ -1004,15 +996,13 @@ public boolean hasPrecommitToken() { * * *
-     * A precommit token will be included in the response of a BeginTransaction
+     * A precommit token is included in the response of a BeginTransaction
      * request if the read-write transaction is on a multiplexed session and
      * a mutation_key was specified in the
      * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
      * The precommit token with the highest sequence number from this transaction
      * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
      * request for this transaction.
-     * This feature is not yet supported and will result in an UNIMPLEMENTED
-     * error.
      * 
* * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; @@ -1033,15 +1023,13 @@ public com.google.spanner.v1.MultiplexedSessionPrecommitToken getPrecommitToken( * * *
-     * A precommit token will be included in the response of a BeginTransaction
+     * A precommit token is included in the response of a BeginTransaction
      * request if the read-write transaction is on a multiplexed session and
      * a mutation_key was specified in the
      * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
      * The precommit token with the highest sequence number from this transaction
      * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
      * request for this transaction.
-     * This feature is not yet supported and will result in an UNIMPLEMENTED
-     * error.
      * 
* * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; @@ -1064,15 +1052,13 @@ public Builder setPrecommitToken(com.google.spanner.v1.MultiplexedSessionPrecomm * * *
-     * A precommit token will be included in the response of a BeginTransaction
+     * A precommit token is included in the response of a BeginTransaction
      * request if the read-write transaction is on a multiplexed session and
      * a mutation_key was specified in the
      * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
      * The precommit token with the highest sequence number from this transaction
      * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
      * request for this transaction.
-     * This feature is not yet supported and will result in an UNIMPLEMENTED
-     * error.
      * 
* * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; @@ -1093,15 +1079,13 @@ public Builder setPrecommitToken( * * *
-     * A precommit token will be included in the response of a BeginTransaction
+     * A precommit token is included in the response of a BeginTransaction
      * request if the read-write transaction is on a multiplexed session and
      * a mutation_key was specified in the
      * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
      * The precommit token with the highest sequence number from this transaction
      * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
      * request for this transaction.
-     * This feature is not yet supported and will result in an UNIMPLEMENTED
-     * error.
      * 
* * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; @@ -1131,15 +1115,13 @@ public Builder mergePrecommitToken( * * *
-     * A precommit token will be included in the response of a BeginTransaction
+     * A precommit token is included in the response of a BeginTransaction
      * request if the read-write transaction is on a multiplexed session and
      * a mutation_key was specified in the
      * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
      * The precommit token with the highest sequence number from this transaction
      * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
      * request for this transaction.
-     * This feature is not yet supported and will result in an UNIMPLEMENTED
-     * error.
      * 
* * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; @@ -1159,15 +1141,13 @@ public Builder clearPrecommitToken() { * * *
-     * A precommit token will be included in the response of a BeginTransaction
+     * A precommit token is included in the response of a BeginTransaction
      * request if the read-write transaction is on a multiplexed session and
      * a mutation_key was specified in the
      * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
      * The precommit token with the highest sequence number from this transaction
      * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
      * request for this transaction.
-     * This feature is not yet supported and will result in an UNIMPLEMENTED
-     * error.
      * 
* * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; @@ -1183,15 +1163,13 @@ public Builder clearPrecommitToken() { * * *
-     * A precommit token will be included in the response of a BeginTransaction
+     * A precommit token is included in the response of a BeginTransaction
      * request if the read-write transaction is on a multiplexed session and
      * a mutation_key was specified in the
      * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
      * The precommit token with the highest sequence number from this transaction
      * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
      * request for this transaction.
-     * This feature is not yet supported and will result in an UNIMPLEMENTED
-     * error.
      * 
* * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; @@ -1211,15 +1189,13 @@ public Builder clearPrecommitToken() { * * *
-     * A precommit token will be included in the response of a BeginTransaction
+     * A precommit token is included in the response of a BeginTransaction
      * request if the read-write transaction is on a multiplexed session and
      * a mutation_key was specified in the
      * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
      * The precommit token with the highest sequence number from this transaction
      * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
      * request for this transaction.
-     * This feature is not yet supported and will result in an UNIMPLEMENTED
-     * error.
      * 
* * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOptions.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOptions.java index e3c7e2989cd..96b7b31bc0d 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOptions.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOptions.java @@ -23,330 +23,7 @@ * * *
- * Transactions:
- *
- * Each session can have at most one active transaction at a time (note that
- * standalone reads and queries use a transaction internally and do count
- * towards the one transaction limit). After the active transaction is
- * completed, the session can immediately be re-used for the next transaction.
- * It is not necessary to create a new session for each transaction.
- *
- * Transaction modes:
- *
- * Cloud Spanner supports three transaction modes:
- *
- *   1. Locking read-write. This type of transaction is the only way
- *      to write data into Cloud Spanner. These transactions rely on
- *      pessimistic locking and, if necessary, two-phase commit.
- *      Locking read-write transactions may abort, requiring the
- *      application to retry.
- *
- *   2. Snapshot read-only. Snapshot read-only transactions provide guaranteed
- *      consistency across several reads, but do not allow
- *      writes. Snapshot read-only transactions can be configured to read at
- *      timestamps in the past, or configured to perform a strong read
- *      (where Spanner will select a timestamp such that the read is
- *      guaranteed to see the effects of all transactions that have committed
- *      before the start of the read). Snapshot read-only transactions do not
- *      need to be committed.
- *
- *      Queries on change streams must be performed with the snapshot read-only
- *      transaction mode, specifying a strong read. Please see
- *      [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong]
- *      for more details.
- *
- *   3. Partitioned DML. This type of transaction is used to execute
- *      a single Partitioned DML statement. Partitioned DML partitions
- *      the key space and runs the DML statement over each partition
- *      in parallel using separate, internal transactions that commit
- *      independently. Partitioned DML transactions do not need to be
- *      committed.
- *
- * For transactions that only read, snapshot read-only transactions
- * provide simpler semantics and are almost always faster. In
- * particular, read-only transactions do not take locks, so they do
- * not conflict with read-write transactions. As a consequence of not
- * taking locks, they also do not abort, so retry loops are not needed.
- *
- * Transactions may only read-write data in a single database. They
- * may, however, read-write data in different tables within that
- * database.
- *
- * Locking read-write transactions:
- *
- * Locking transactions may be used to atomically read-modify-write
- * data anywhere in a database. This type of transaction is externally
- * consistent.
- *
- * Clients should attempt to minimize the amount of time a transaction
- * is active. Faster transactions commit with higher probability
- * and cause less contention. Cloud Spanner attempts to keep read locks
- * active as long as the transaction continues to do reads, and the
- * transaction has not been terminated by
- * [Commit][google.spanner.v1.Spanner.Commit] or
- * [Rollback][google.spanner.v1.Spanner.Rollback]. Long periods of
- * inactivity at the client may cause Cloud Spanner to release a
- * transaction's locks and abort it.
- *
- * Conceptually, a read-write transaction consists of zero or more
- * reads or SQL statements followed by
- * [Commit][google.spanner.v1.Spanner.Commit]. At any time before
- * [Commit][google.spanner.v1.Spanner.Commit], the client can send a
- * [Rollback][google.spanner.v1.Spanner.Rollback] request to abort the
- * transaction.
- *
- * Semantics:
- *
- * Cloud Spanner can commit the transaction if all read locks it acquired
- * are still valid at commit time, and it is able to acquire write
- * locks for all writes. Cloud Spanner can abort the transaction for any
- * reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
- * that the transaction has not modified any user data in Cloud Spanner.
- *
- * Unless the transaction commits, Cloud Spanner makes no guarantees about
- * how long the transaction's locks were held for. It is an error to
- * use Cloud Spanner locks for any sort of mutual exclusion other than
- * between Cloud Spanner transactions themselves.
- *
- * Retrying aborted transactions:
- *
- * When a transaction aborts, the application can choose to retry the
- * whole transaction again. To maximize the chances of successfully
- * committing the retry, the client should execute the retry in the
- * same session as the original attempt. The original session's lock
- * priority increases with each consecutive abort, meaning that each
- * attempt has a slightly better chance of success than the previous.
- *
- * Under some circumstances (for example, many transactions attempting to
- * modify the same row(s)), a transaction can abort many times in a
- * short period before successfully committing. Thus, it is not a good
- * idea to cap the number of retries a transaction can attempt;
- * instead, it is better to limit the total amount of time spent
- * retrying.
- *
- * Idle transactions:
- *
- * A transaction is considered idle if it has no outstanding reads or
- * SQL queries and has not started a read or SQL query within the last 10
- * seconds. Idle transactions can be aborted by Cloud Spanner so that they
- * don't hold on to locks indefinitely. If an idle transaction is aborted, the
- * commit will fail with error `ABORTED`.
- *
- * If this behavior is undesirable, periodically executing a simple
- * SQL query in the transaction (for example, `SELECT 1`) prevents the
- * transaction from becoming idle.
- *
- * Snapshot read-only transactions:
- *
- * Snapshot read-only transactions provides a simpler method than
- * locking read-write transactions for doing several consistent
- * reads. However, this type of transaction does not support writes.
- *
- * Snapshot transactions do not take locks. Instead, they work by
- * choosing a Cloud Spanner timestamp, then executing all reads at that
- * timestamp. Since they do not acquire locks, they do not block
- * concurrent read-write transactions.
- *
- * Unlike locking read-write transactions, snapshot read-only
- * transactions never abort. They can fail if the chosen read
- * timestamp is garbage collected; however, the default garbage
- * collection policy is generous enough that most applications do not
- * need to worry about this in practice.
- *
- * Snapshot read-only transactions do not need to call
- * [Commit][google.spanner.v1.Spanner.Commit] or
- * [Rollback][google.spanner.v1.Spanner.Rollback] (and in fact are not
- * permitted to do so).
- *
- * To execute a snapshot transaction, the client specifies a timestamp
- * bound, which tells Cloud Spanner how to choose a read timestamp.
- *
- * The types of timestamp bound are:
- *
- *   - Strong (the default).
- *   - Bounded staleness.
- *   - Exact staleness.
- *
- * If the Cloud Spanner database to be read is geographically distributed,
- * stale read-only transactions can execute more quickly than strong
- * or read-write transactions, because they are able to execute far
- * from the leader replica.
- *
- * Each type of timestamp bound is discussed in detail below.
- *
- * Strong: Strong reads are guaranteed to see the effects of all transactions
- * that have committed before the start of the read. Furthermore, all
- * rows yielded by a single read are consistent with each other -- if
- * any part of the read observes a transaction, all parts of the read
- * see the transaction.
- *
- * Strong reads are not repeatable: two consecutive strong read-only
- * transactions might return inconsistent results if there are
- * concurrent writes. If consistency across reads is required, the
- * reads should be executed within a transaction or at an exact read
- * timestamp.
- *
- * Queries on change streams (see below for more details) must also specify
- * the strong read timestamp bound.
- *
- * See
- * [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong].
- *
- * Exact staleness:
- *
- * These timestamp bounds execute reads at a user-specified
- * timestamp. Reads at a timestamp are guaranteed to see a consistent
- * prefix of the global transaction history: they observe
- * modifications done by all transactions with a commit timestamp less than or
- * equal to the read timestamp, and observe none of the modifications done by
- * transactions with a larger commit timestamp. They will block until
- * all conflicting transactions that may be assigned commit timestamps
- * <= the read timestamp have finished.
- *
- * The timestamp can either be expressed as an absolute Cloud Spanner commit
- * timestamp or a staleness relative to the current time.
- *
- * These modes do not require a "negotiation phase" to pick a
- * timestamp. As a result, they execute slightly faster than the
- * equivalent boundedly stale concurrency modes. On the other hand,
- * boundedly stale reads usually return fresher results.
- *
- * See
- * [TransactionOptions.ReadOnly.read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.read_timestamp]
- * and
- * [TransactionOptions.ReadOnly.exact_staleness][google.spanner.v1.TransactionOptions.ReadOnly.exact_staleness].
- *
- * Bounded staleness:
- *
- * Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
- * subject to a user-provided staleness bound. Cloud Spanner chooses the
- * newest timestamp within the staleness bound that allows execution
- * of the reads at the closest available replica without blocking.
- *
- * All rows yielded are consistent with each other -- if any part of
- * the read observes a transaction, all parts of the read see the
- * transaction. Boundedly stale reads are not repeatable: two stale
- * reads, even if they use the same staleness bound, can execute at
- * different timestamps and thus return inconsistent results.
- *
- * Boundedly stale reads execute in two phases: the first phase
- * negotiates a timestamp among all replicas needed to serve the
- * read. In the second phase, reads are executed at the negotiated
- * timestamp.
- *
- * As a result of the two phase execution, bounded staleness reads are
- * usually a little slower than comparable exact staleness
- * reads. However, they are typically able to return fresher
- * results, and are more likely to execute at the closest replica.
- *
- * Because the timestamp negotiation requires up-front knowledge of
- * which rows will be read, it can only be used with single-use
- * read-only transactions.
- *
- * See
- * [TransactionOptions.ReadOnly.max_staleness][google.spanner.v1.TransactionOptions.ReadOnly.max_staleness]
- * and
- * [TransactionOptions.ReadOnly.min_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.min_read_timestamp].
- *
- * Old read timestamps and garbage collection:
- *
- * Cloud Spanner continuously garbage collects deleted and overwritten data
- * in the background to reclaim storage space. This process is known
- * as "version GC". By default, version GC reclaims versions after they
- * are one hour old. Because of this, Cloud Spanner cannot perform reads
- * at read timestamps more than one hour in the past. This
- * restriction also applies to in-progress reads and/or SQL queries whose
- * timestamp become too old while executing. Reads and SQL queries with
- * too-old read timestamps fail with the error `FAILED_PRECONDITION`.
- *
- * You can configure and extend the `VERSION_RETENTION_PERIOD` of a
- * database up to a period as long as one week, which allows Cloud Spanner
- * to perform reads up to one week in the past.
- *
- * Querying change Streams:
- *
- * A Change Stream is a schema object that can be configured to watch data
- * changes on the entire database, a set of tables, or a set of columns
- * in a database.
- *
- * When a change stream is created, Spanner automatically defines a
- * corresponding SQL Table-Valued Function (TVF) that can be used to query
- * the change records in the associated change stream using the
- * ExecuteStreamingSql API. The name of the TVF for a change stream is
- * generated from the name of the change stream: READ_<change_stream_name>.
- *
- * All queries on change stream TVFs must be executed using the
- * ExecuteStreamingSql API with a single-use read-only transaction with a
- * strong read-only timestamp_bound. The change stream TVF allows users to
- * specify the start_timestamp and end_timestamp for the time range of
- * interest. All change records within the retention period is accessible
- * using the strong read-only timestamp_bound. All other TransactionOptions
- * are invalid for change stream queries.
- *
- * In addition, if TransactionOptions.read_only.return_read_timestamp is set
- * to true, a special value of 2^63 - 2 will be returned in the
- * [Transaction][google.spanner.v1.Transaction] message that describes the
- * transaction, instead of a valid read timestamp. This special value should be
- * discarded and not used for any subsequent queries.
- *
- * Please see https://cloud.google.com/spanner/docs/change-streams
- * for more details on how to query the change stream TVFs.
- *
- * Partitioned DML transactions:
- *
- * Partitioned DML transactions are used to execute DML statements with a
- * different execution strategy that provides different, and often better,
- * scalability properties for large, table-wide operations than DML in a
- * ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
- * should prefer using ReadWrite transactions.
- *
- * Partitioned DML partitions the keyspace and runs the DML statement on each
- * partition in separate, internal transactions. These transactions commit
- * automatically when complete, and run independently from one another.
- *
- * To reduce lock contention, this execution strategy only acquires read locks
- * on rows that match the WHERE clause of the statement. Additionally, the
- * smaller per-partition transactions hold locks for less time.
- *
- * That said, Partitioned DML is not a drop-in replacement for standard DML used
- * in ReadWrite transactions.
- *
- *  - The DML statement must be fully-partitionable. Specifically, the statement
- *    must be expressible as the union of many statements which each access only
- *    a single row of the table.
- *
- *  - The statement is not applied atomically to all rows of the table. Rather,
- *    the statement is applied atomically to partitions of the table, in
- *    independent transactions. Secondary index rows are updated atomically
- *    with the base table rows.
- *
- *  - Partitioned DML does not guarantee exactly-once execution semantics
- *    against a partition. The statement will be applied at least once to each
- *    partition. It is strongly recommended that the DML statement should be
- *    idempotent to avoid unexpected results. For instance, it is potentially
- *    dangerous to run a statement such as
- *    `UPDATE table SET column = column + 1` as it could be run multiple times
- *    against some rows.
- *
- *  - The partitions are committed automatically - there is no support for
- *    Commit or Rollback. If the call returns an error, or if the client issuing
- *    the ExecuteSql call dies, it is possible that some rows had the statement
- *    executed on them successfully. It is also possible that statement was
- *    never executed against other rows.
- *
- *  - Partitioned DML transactions may only contain the execution of a single
- *    DML statement via ExecuteSql or ExecuteStreamingSql.
- *
- *  - If any error is encountered during the execution of the partitioned DML
- *    operation (for instance, a UNIQUE INDEX violation, division by zero, or a
- *    value that cannot be stored due to schema constraints), then the
- *    operation is stopped at that point and an error is returned. It is
- *    possible that at this point, some partitions have been committed (or even
- *    committed multiple times), and other partitions have not been run at all.
- *
- * Given the above, Partitioned DML is good fit for large, database-wide,
- * operations that are idempotent, such as deleting old rows from a very large
- * table.
+ * Options to use for transactions.
  * 
* * Protobuf type {@code google.spanner.v1.TransactionOptions} @@ -431,9 +108,9 @@ public enum IsolationLevel implements com.google.protobuf.ProtocolMessageEnum { * *
      * All reads performed during the transaction observe a consistent snapshot
-     * of the database, and the transaction will only successfully commit in the
-     * absence of conflicts between its updates and any concurrent updates that
-     * have occurred since that snapshot. Consequently, in contrast to
+     * of the database, and the transaction is only successfully committed in
+     * the absence of conflicts between its updates and any concurrent updates
+     * that have occurred since that snapshot. Consequently, in contrast to
      * `SERIALIZABLE` transactions, only write-write conflicts are detected in
      * snapshot transactions.
      *
@@ -486,9 +163,9 @@ public enum IsolationLevel implements com.google.protobuf.ProtocolMessageEnum {
      *
      * 
      * All reads performed during the transaction observe a consistent snapshot
-     * of the database, and the transaction will only successfully commit in the
-     * absence of conflicts between its updates and any concurrent updates that
-     * have occurred since that snapshot. Consequently, in contrast to
+     * of the database, and the transaction is only successfully committed in
+     * the absence of conflicts between its updates and any concurrent updates
+     * that have occurred since that snapshot. Consequently, in contrast to
      * `SERIALIZABLE` transactions, only write-write conflicts are detected in
      * snapshot transactions.
      *
@@ -624,8 +301,6 @@ public interface ReadWriteOrBuilder
      * Optional. Clients should pass the transaction ID of the previous
      * transaction attempt that was aborted if this transaction is being
      * executed on a multiplexed session.
-     * This feature is not yet supported and will result in an UNIMPLEMENTED
-     * error.
      * 
* * @@ -701,17 +376,18 @@ public enum ReadLockMode implements com.google.protobuf.ProtocolMessageEnum { *
        * Default value.
        *
-       * * If isolation level is `REPEATABLE_READ`, then it is an error to
-       *   specify `read_lock_mode`. Locking semantics default to `OPTIMISTIC`.
-       *   No validation checks are done for reads, except for:
+       * * If isolation level is
+       *   [REPEATABLE_READ][google.spanner.v1.TransactionOptions.IsolationLevel.REPEATABLE_READ],
+       *   then it is an error to specify `read_lock_mode`. Locking semantics
+       *   default to `OPTIMISTIC`. No validation checks are done for reads,
+       *   except to validate that the data that was served at the snapshot time
+       *   is unchanged at commit time in the following cases:
        *     1. reads done as part of queries that use `SELECT FOR UPDATE`
        *     2. reads done as part of statements with a `LOCK_SCANNED_RANGES`
        *        hint
        *     3. reads done as part of DML statements
-       *   to validate that the data that was served at the snapshot time is
-       *   unchanged at commit time.
        * * At all other isolation levels, if `read_lock_mode` is the default
-       *   value, then pessimistic read lock is used.
+       *   value, then pessimistic read locks are used.
        * 
* * READ_LOCK_MODE_UNSPECIFIED = 0; @@ -724,7 +400,9 @@ public enum ReadLockMode implements com.google.protobuf.ProtocolMessageEnum { * Pessimistic lock mode. * * Read locks are acquired immediately on read. - * Semantics described only applies to `SERIALIZABLE` isolation. + * Semantics described only applies to + * [SERIALIZABLE][google.spanner.v1.TransactionOptions.IsolationLevel.SERIALIZABLE] + * isolation. *
* * PESSIMISTIC = 1; @@ -739,7 +417,9 @@ public enum ReadLockMode implements com.google.protobuf.ProtocolMessageEnum { * Locks for reads within the transaction are not acquired on read. * Instead the locks are acquired on a commit to validate that * read/queried data has not changed since the transaction started. - * Semantics described only applies to `SERIALIZABLE` isolation. + * Semantics described only applies to + * [SERIALIZABLE][google.spanner.v1.TransactionOptions.IsolationLevel.SERIALIZABLE] + * isolation. * * * OPTIMISTIC = 2; @@ -754,17 +434,18 @@ public enum ReadLockMode implements com.google.protobuf.ProtocolMessageEnum { *
        * Default value.
        *
-       * * If isolation level is `REPEATABLE_READ`, then it is an error to
-       *   specify `read_lock_mode`. Locking semantics default to `OPTIMISTIC`.
-       *   No validation checks are done for reads, except for:
+       * * If isolation level is
+       *   [REPEATABLE_READ][google.spanner.v1.TransactionOptions.IsolationLevel.REPEATABLE_READ],
+       *   then it is an error to specify `read_lock_mode`. Locking semantics
+       *   default to `OPTIMISTIC`. No validation checks are done for reads,
+       *   except to validate that the data that was served at the snapshot time
+       *   is unchanged at commit time in the following cases:
        *     1. reads done as part of queries that use `SELECT FOR UPDATE`
        *     2. reads done as part of statements with a `LOCK_SCANNED_RANGES`
        *        hint
        *     3. reads done as part of DML statements
-       *   to validate that the data that was served at the snapshot time is
-       *   unchanged at commit time.
        * * At all other isolation levels, if `read_lock_mode` is the default
-       *   value, then pessimistic read lock is used.
+       *   value, then pessimistic read locks are used.
        * 
* * READ_LOCK_MODE_UNSPECIFIED = 0; @@ -778,7 +459,9 @@ public enum ReadLockMode implements com.google.protobuf.ProtocolMessageEnum { * Pessimistic lock mode. * * Read locks are acquired immediately on read. - * Semantics described only applies to `SERIALIZABLE` isolation. + * Semantics described only applies to + * [SERIALIZABLE][google.spanner.v1.TransactionOptions.IsolationLevel.SERIALIZABLE] + * isolation. * * * PESSIMISTIC = 1; @@ -794,7 +477,9 @@ public enum ReadLockMode implements com.google.protobuf.ProtocolMessageEnum { * Locks for reads within the transaction are not acquired on read. * Instead the locks are acquired on a commit to validate that * read/queried data has not changed since the transaction started. - * Semantics described only applies to `SERIALIZABLE` isolation. + * Semantics described only applies to + * [SERIALIZABLE][google.spanner.v1.TransactionOptions.IsolationLevel.SERIALIZABLE] + * isolation. * * * OPTIMISTIC = 2; @@ -936,8 +621,6 @@ public com.google.spanner.v1.TransactionOptions.ReadWrite.ReadLockMode getReadLo * Optional. Clients should pass the transaction ID of the previous * transaction attempt that was aborted if this transaction is being * executed on a multiplexed session. - * This feature is not yet supported and will result in an UNIMPLEMENTED - * error. * * * @@ -1445,8 +1128,6 @@ public Builder clearReadLockMode() { * Optional. Clients should pass the transaction ID of the previous * transaction attempt that was aborted if this transaction is being * executed on a multiplexed session. - * This feature is not yet supported and will result in an UNIMPLEMENTED - * error. * * * @@ -1467,8 +1148,6 @@ public com.google.protobuf.ByteString getMultiplexedSessionPreviousTransactionId * Optional. Clients should pass the transaction ID of the previous * transaction attempt that was aborted if this transaction is being * executed on a multiplexed session. - * This feature is not yet supported and will result in an UNIMPLEMENTED - * error. * * * @@ -1496,8 +1175,6 @@ public Builder setMultiplexedSessionPreviousTransactionId( * Optional. Clients should pass the transaction ID of the previous * transaction attempt that was aborted if this transaction is being * executed on a multiplexed session. - * This feature is not yet supported and will result in an UNIMPLEMENTED - * error. * * * @@ -2181,7 +1858,7 @@ public interface ReadOnlyOrBuilder * Executes all reads at the given timestamp. Unlike other modes, * reads at a specific timestamp are repeatable; the same read at * the same timestamp always returns the same data. If the - * timestamp is in the future, the read will block until the + * timestamp is in the future, the read is blocked until the * specified timestamp, modulo the read's deadline. * * Useful for large scale consistent reads such as mapreduces, or @@ -2205,7 +1882,7 @@ public interface ReadOnlyOrBuilder * Executes all reads at the given timestamp. Unlike other modes, * reads at a specific timestamp are repeatable; the same read at * the same timestamp always returns the same data. If the - * timestamp is in the future, the read will block until the + * timestamp is in the future, the read is blocked until the * specified timestamp, modulo the read's deadline. * * Useful for large scale consistent reads such as mapreduces, or @@ -2229,7 +1906,7 @@ public interface ReadOnlyOrBuilder * Executes all reads at the given timestamp. Unlike other modes, * reads at a specific timestamp are repeatable; the same read at * the same timestamp always returns the same data. If the - * timestamp is in the future, the read will block until the + * timestamp is in the future, the read is blocked until the * specified timestamp, modulo the read's deadline. * * Useful for large scale consistent reads such as mapreduces, or @@ -2650,7 +2327,7 @@ public com.google.protobuf.DurationOrBuilder getMaxStalenessOrBuilder() { * Executes all reads at the given timestamp. Unlike other modes, * reads at a specific timestamp are repeatable; the same read at * the same timestamp always returns the same data. If the - * timestamp is in the future, the read will block until the + * timestamp is in the future, the read is blocked until the * specified timestamp, modulo the read's deadline. * * Useful for large scale consistent reads such as mapreduces, or @@ -2677,7 +2354,7 @@ public boolean hasReadTimestamp() { * Executes all reads at the given timestamp. Unlike other modes, * reads at a specific timestamp are repeatable; the same read at * the same timestamp always returns the same data. If the - * timestamp is in the future, the read will block until the + * timestamp is in the future, the read is blocked until the * specified timestamp, modulo the read's deadline. * * Useful for large scale consistent reads such as mapreduces, or @@ -2707,7 +2384,7 @@ public com.google.protobuf.Timestamp getReadTimestamp() { * Executes all reads at the given timestamp. Unlike other modes, * reads at a specific timestamp are repeatable; the same read at * the same timestamp always returns the same data. If the - * timestamp is in the future, the read will block until the + * timestamp is in the future, the read is blocked until the * specified timestamp, modulo the read's deadline. * * Useful for large scale consistent reads such as mapreduces, or @@ -4084,7 +3761,7 @@ public com.google.protobuf.DurationOrBuilder getMaxStalenessOrBuilder() { * Executes all reads at the given timestamp. Unlike other modes, * reads at a specific timestamp are repeatable; the same read at * the same timestamp always returns the same data. If the - * timestamp is in the future, the read will block until the + * timestamp is in the future, the read is blocked until the * specified timestamp, modulo the read's deadline. * * Useful for large scale consistent reads such as mapreduces, or @@ -4111,7 +3788,7 @@ public boolean hasReadTimestamp() { * Executes all reads at the given timestamp. Unlike other modes, * reads at a specific timestamp are repeatable; the same read at * the same timestamp always returns the same data. If the - * timestamp is in the future, the read will block until the + * timestamp is in the future, the read is blocked until the * specified timestamp, modulo the read's deadline. * * Useful for large scale consistent reads such as mapreduces, or @@ -4148,7 +3825,7 @@ public com.google.protobuf.Timestamp getReadTimestamp() { * Executes all reads at the given timestamp. Unlike other modes, * reads at a specific timestamp are repeatable; the same read at * the same timestamp always returns the same data. If the - * timestamp is in the future, the read will block until the + * timestamp is in the future, the read is blocked until the * specified timestamp, modulo the read's deadline. * * Useful for large scale consistent reads such as mapreduces, or @@ -4182,7 +3859,7 @@ public Builder setReadTimestamp(com.google.protobuf.Timestamp value) { * Executes all reads at the given timestamp. Unlike other modes, * reads at a specific timestamp are repeatable; the same read at * the same timestamp always returns the same data. If the - * timestamp is in the future, the read will block until the + * timestamp is in the future, the read is blocked until the * specified timestamp, modulo the read's deadline. * * Useful for large scale consistent reads such as mapreduces, or @@ -4213,7 +3890,7 @@ public Builder setReadTimestamp(com.google.protobuf.Timestamp.Builder builderFor * Executes all reads at the given timestamp. Unlike other modes, * reads at a specific timestamp are repeatable; the same read at * the same timestamp always returns the same data. If the - * timestamp is in the future, the read will block until the + * timestamp is in the future, the read is blocked until the * specified timestamp, modulo the read's deadline. * * Useful for large scale consistent reads such as mapreduces, or @@ -4257,7 +3934,7 @@ public Builder mergeReadTimestamp(com.google.protobuf.Timestamp value) { * Executes all reads at the given timestamp. Unlike other modes, * reads at a specific timestamp are repeatable; the same read at * the same timestamp always returns the same data. If the - * timestamp is in the future, the read will block until the + * timestamp is in the future, the read is blocked until the * specified timestamp, modulo the read's deadline. * * Useful for large scale consistent reads such as mapreduces, or @@ -4294,7 +3971,7 @@ public Builder clearReadTimestamp() { * Executes all reads at the given timestamp. Unlike other modes, * reads at a specific timestamp are repeatable; the same read at * the same timestamp always returns the same data. If the - * timestamp is in the future, the read will block until the + * timestamp is in the future, the read is blocked until the * specified timestamp, modulo the read's deadline. * * Useful for large scale consistent reads such as mapreduces, or @@ -4318,7 +3995,7 @@ public com.google.protobuf.Timestamp.Builder getReadTimestampBuilder() { * Executes all reads at the given timestamp. Unlike other modes, * reads at a specific timestamp are repeatable; the same read at * the same timestamp always returns the same data. If the - * timestamp is in the future, the read will block until the + * timestamp is in the future, the read is blocked until the * specified timestamp, modulo the read's deadline. * * Useful for large scale consistent reads such as mapreduces, or @@ -4350,7 +4027,7 @@ public com.google.protobuf.TimestampOrBuilder getReadTimestampOrBuilder() { * Executes all reads at the given timestamp. Unlike other modes, * reads at a specific timestamp are repeatable; the same read at * the same timestamp always returns the same data. If the - * timestamp is in the future, the read will block until the + * timestamp is in the future, the read is blocked until the * specified timestamp, modulo the read's deadline. * * Useful for large scale consistent reads such as mapreduces, or @@ -5012,7 +4689,7 @@ public com.google.spanner.v1.TransactionOptions.PartitionedDml getPartitionedDml * * *
-   * Transaction will not write.
+   * Transaction does not write.
    *
    * Authorization to begin a read-only transaction requires
    * `spanner.databases.beginReadOnlyTransaction` permission
@@ -5032,7 +4709,7 @@ public boolean hasReadOnly() {
    *
    *
    * 
-   * Transaction will not write.
+   * Transaction does not write.
    *
    * Authorization to begin a read-only transaction requires
    * `spanner.databases.beginReadOnlyTransaction` permission
@@ -5055,7 +4732,7 @@ public com.google.spanner.v1.TransactionOptions.ReadOnly getReadOnly() {
    *
    *
    * 
-   * Transaction will not write.
+   * Transaction does not write.
    *
    * Authorization to begin a read-only transaction requires
    * `spanner.databases.beginReadOnlyTransaction` permission
@@ -5079,20 +4756,24 @@ public com.google.spanner.v1.TransactionOptions.ReadOnlyOrBuilder getReadOnlyOrB
    *
    *
    * 
-   * When `exclude_txn_from_change_streams` is set to `true`:
-   *  * Mutations from this transaction will not be recorded in change streams
-   *  with DDL option `allow_txn_exclusion=true` that are tracking columns
-   *  modified by these transactions.
-   *  * Mutations from this transaction will be recorded in change streams with
-   *  DDL option `allow_txn_exclusion=false or not set` that are tracking
-   *  columns modified by these transactions.
+   * When `exclude_txn_from_change_streams` is set to `true`, it prevents read
+   * or write transactions from being tracked in change streams.
+   *
+   * * If the DDL option `allow_txn_exclusion` is set to `true`, then the
+   * updates
+   *  made within this transaction aren't recorded in the change stream.
+   *
+   * * If you don't set the DDL option `allow_txn_exclusion` or if it's
+   *  set to `false`, then the updates made within this transaction are
+   *  recorded in the change stream.
    *
    * When `exclude_txn_from_change_streams` is set to `false` or not set,
-   * mutations from this transaction will be recorded in all change streams that
-   * are tracking columns modified by these transactions.
-   * `exclude_txn_from_change_streams` may only be specified for read-write or
-   * partitioned-dml transactions, otherwise the API will return an
-   * `INVALID_ARGUMENT` error.
+   * modifications from this transaction are recorded in all change streams
+   * that are tracking columns modified by these transactions.
+   *
+   * The `exclude_txn_from_change_streams` option can only be specified
+   * for read-write or partitioned DML transactions, otherwise the API returns
+   * an `INVALID_ARGUMENT` error.
    * 
* * bool exclude_txn_from_change_streams = 5; @@ -5374,330 +5055,7 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * * *
-   * Transactions:
-   *
-   * Each session can have at most one active transaction at a time (note that
-   * standalone reads and queries use a transaction internally and do count
-   * towards the one transaction limit). After the active transaction is
-   * completed, the session can immediately be re-used for the next transaction.
-   * It is not necessary to create a new session for each transaction.
-   *
-   * Transaction modes:
-   *
-   * Cloud Spanner supports three transaction modes:
-   *
-   *   1. Locking read-write. This type of transaction is the only way
-   *      to write data into Cloud Spanner. These transactions rely on
-   *      pessimistic locking and, if necessary, two-phase commit.
-   *      Locking read-write transactions may abort, requiring the
-   *      application to retry.
-   *
-   *   2. Snapshot read-only. Snapshot read-only transactions provide guaranteed
-   *      consistency across several reads, but do not allow
-   *      writes. Snapshot read-only transactions can be configured to read at
-   *      timestamps in the past, or configured to perform a strong read
-   *      (where Spanner will select a timestamp such that the read is
-   *      guaranteed to see the effects of all transactions that have committed
-   *      before the start of the read). Snapshot read-only transactions do not
-   *      need to be committed.
-   *
-   *      Queries on change streams must be performed with the snapshot read-only
-   *      transaction mode, specifying a strong read. Please see
-   *      [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong]
-   *      for more details.
-   *
-   *   3. Partitioned DML. This type of transaction is used to execute
-   *      a single Partitioned DML statement. Partitioned DML partitions
-   *      the key space and runs the DML statement over each partition
-   *      in parallel using separate, internal transactions that commit
-   *      independently. Partitioned DML transactions do not need to be
-   *      committed.
-   *
-   * For transactions that only read, snapshot read-only transactions
-   * provide simpler semantics and are almost always faster. In
-   * particular, read-only transactions do not take locks, so they do
-   * not conflict with read-write transactions. As a consequence of not
-   * taking locks, they also do not abort, so retry loops are not needed.
-   *
-   * Transactions may only read-write data in a single database. They
-   * may, however, read-write data in different tables within that
-   * database.
-   *
-   * Locking read-write transactions:
-   *
-   * Locking transactions may be used to atomically read-modify-write
-   * data anywhere in a database. This type of transaction is externally
-   * consistent.
-   *
-   * Clients should attempt to minimize the amount of time a transaction
-   * is active. Faster transactions commit with higher probability
-   * and cause less contention. Cloud Spanner attempts to keep read locks
-   * active as long as the transaction continues to do reads, and the
-   * transaction has not been terminated by
-   * [Commit][google.spanner.v1.Spanner.Commit] or
-   * [Rollback][google.spanner.v1.Spanner.Rollback]. Long periods of
-   * inactivity at the client may cause Cloud Spanner to release a
-   * transaction's locks and abort it.
-   *
-   * Conceptually, a read-write transaction consists of zero or more
-   * reads or SQL statements followed by
-   * [Commit][google.spanner.v1.Spanner.Commit]. At any time before
-   * [Commit][google.spanner.v1.Spanner.Commit], the client can send a
-   * [Rollback][google.spanner.v1.Spanner.Rollback] request to abort the
-   * transaction.
-   *
-   * Semantics:
-   *
-   * Cloud Spanner can commit the transaction if all read locks it acquired
-   * are still valid at commit time, and it is able to acquire write
-   * locks for all writes. Cloud Spanner can abort the transaction for any
-   * reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees
-   * that the transaction has not modified any user data in Cloud Spanner.
-   *
-   * Unless the transaction commits, Cloud Spanner makes no guarantees about
-   * how long the transaction's locks were held for. It is an error to
-   * use Cloud Spanner locks for any sort of mutual exclusion other than
-   * between Cloud Spanner transactions themselves.
-   *
-   * Retrying aborted transactions:
-   *
-   * When a transaction aborts, the application can choose to retry the
-   * whole transaction again. To maximize the chances of successfully
-   * committing the retry, the client should execute the retry in the
-   * same session as the original attempt. The original session's lock
-   * priority increases with each consecutive abort, meaning that each
-   * attempt has a slightly better chance of success than the previous.
-   *
-   * Under some circumstances (for example, many transactions attempting to
-   * modify the same row(s)), a transaction can abort many times in a
-   * short period before successfully committing. Thus, it is not a good
-   * idea to cap the number of retries a transaction can attempt;
-   * instead, it is better to limit the total amount of time spent
-   * retrying.
-   *
-   * Idle transactions:
-   *
-   * A transaction is considered idle if it has no outstanding reads or
-   * SQL queries and has not started a read or SQL query within the last 10
-   * seconds. Idle transactions can be aborted by Cloud Spanner so that they
-   * don't hold on to locks indefinitely. If an idle transaction is aborted, the
-   * commit will fail with error `ABORTED`.
-   *
-   * If this behavior is undesirable, periodically executing a simple
-   * SQL query in the transaction (for example, `SELECT 1`) prevents the
-   * transaction from becoming idle.
-   *
-   * Snapshot read-only transactions:
-   *
-   * Snapshot read-only transactions provides a simpler method than
-   * locking read-write transactions for doing several consistent
-   * reads. However, this type of transaction does not support writes.
-   *
-   * Snapshot transactions do not take locks. Instead, they work by
-   * choosing a Cloud Spanner timestamp, then executing all reads at that
-   * timestamp. Since they do not acquire locks, they do not block
-   * concurrent read-write transactions.
-   *
-   * Unlike locking read-write transactions, snapshot read-only
-   * transactions never abort. They can fail if the chosen read
-   * timestamp is garbage collected; however, the default garbage
-   * collection policy is generous enough that most applications do not
-   * need to worry about this in practice.
-   *
-   * Snapshot read-only transactions do not need to call
-   * [Commit][google.spanner.v1.Spanner.Commit] or
-   * [Rollback][google.spanner.v1.Spanner.Rollback] (and in fact are not
-   * permitted to do so).
-   *
-   * To execute a snapshot transaction, the client specifies a timestamp
-   * bound, which tells Cloud Spanner how to choose a read timestamp.
-   *
-   * The types of timestamp bound are:
-   *
-   *   - Strong (the default).
-   *   - Bounded staleness.
-   *   - Exact staleness.
-   *
-   * If the Cloud Spanner database to be read is geographically distributed,
-   * stale read-only transactions can execute more quickly than strong
-   * or read-write transactions, because they are able to execute far
-   * from the leader replica.
-   *
-   * Each type of timestamp bound is discussed in detail below.
-   *
-   * Strong: Strong reads are guaranteed to see the effects of all transactions
-   * that have committed before the start of the read. Furthermore, all
-   * rows yielded by a single read are consistent with each other -- if
-   * any part of the read observes a transaction, all parts of the read
-   * see the transaction.
-   *
-   * Strong reads are not repeatable: two consecutive strong read-only
-   * transactions might return inconsistent results if there are
-   * concurrent writes. If consistency across reads is required, the
-   * reads should be executed within a transaction or at an exact read
-   * timestamp.
-   *
-   * Queries on change streams (see below for more details) must also specify
-   * the strong read timestamp bound.
-   *
-   * See
-   * [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong].
-   *
-   * Exact staleness:
-   *
-   * These timestamp bounds execute reads at a user-specified
-   * timestamp. Reads at a timestamp are guaranteed to see a consistent
-   * prefix of the global transaction history: they observe
-   * modifications done by all transactions with a commit timestamp less than or
-   * equal to the read timestamp, and observe none of the modifications done by
-   * transactions with a larger commit timestamp. They will block until
-   * all conflicting transactions that may be assigned commit timestamps
-   * <= the read timestamp have finished.
-   *
-   * The timestamp can either be expressed as an absolute Cloud Spanner commit
-   * timestamp or a staleness relative to the current time.
-   *
-   * These modes do not require a "negotiation phase" to pick a
-   * timestamp. As a result, they execute slightly faster than the
-   * equivalent boundedly stale concurrency modes. On the other hand,
-   * boundedly stale reads usually return fresher results.
-   *
-   * See
-   * [TransactionOptions.ReadOnly.read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.read_timestamp]
-   * and
-   * [TransactionOptions.ReadOnly.exact_staleness][google.spanner.v1.TransactionOptions.ReadOnly.exact_staleness].
-   *
-   * Bounded staleness:
-   *
-   * Bounded staleness modes allow Cloud Spanner to pick the read timestamp,
-   * subject to a user-provided staleness bound. Cloud Spanner chooses the
-   * newest timestamp within the staleness bound that allows execution
-   * of the reads at the closest available replica without blocking.
-   *
-   * All rows yielded are consistent with each other -- if any part of
-   * the read observes a transaction, all parts of the read see the
-   * transaction. Boundedly stale reads are not repeatable: two stale
-   * reads, even if they use the same staleness bound, can execute at
-   * different timestamps and thus return inconsistent results.
-   *
-   * Boundedly stale reads execute in two phases: the first phase
-   * negotiates a timestamp among all replicas needed to serve the
-   * read. In the second phase, reads are executed at the negotiated
-   * timestamp.
-   *
-   * As a result of the two phase execution, bounded staleness reads are
-   * usually a little slower than comparable exact staleness
-   * reads. However, they are typically able to return fresher
-   * results, and are more likely to execute at the closest replica.
-   *
-   * Because the timestamp negotiation requires up-front knowledge of
-   * which rows will be read, it can only be used with single-use
-   * read-only transactions.
-   *
-   * See
-   * [TransactionOptions.ReadOnly.max_staleness][google.spanner.v1.TransactionOptions.ReadOnly.max_staleness]
-   * and
-   * [TransactionOptions.ReadOnly.min_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.min_read_timestamp].
-   *
-   * Old read timestamps and garbage collection:
-   *
-   * Cloud Spanner continuously garbage collects deleted and overwritten data
-   * in the background to reclaim storage space. This process is known
-   * as "version GC". By default, version GC reclaims versions after they
-   * are one hour old. Because of this, Cloud Spanner cannot perform reads
-   * at read timestamps more than one hour in the past. This
-   * restriction also applies to in-progress reads and/or SQL queries whose
-   * timestamp become too old while executing. Reads and SQL queries with
-   * too-old read timestamps fail with the error `FAILED_PRECONDITION`.
-   *
-   * You can configure and extend the `VERSION_RETENTION_PERIOD` of a
-   * database up to a period as long as one week, which allows Cloud Spanner
-   * to perform reads up to one week in the past.
-   *
-   * Querying change Streams:
-   *
-   * A Change Stream is a schema object that can be configured to watch data
-   * changes on the entire database, a set of tables, or a set of columns
-   * in a database.
-   *
-   * When a change stream is created, Spanner automatically defines a
-   * corresponding SQL Table-Valued Function (TVF) that can be used to query
-   * the change records in the associated change stream using the
-   * ExecuteStreamingSql API. The name of the TVF for a change stream is
-   * generated from the name of the change stream: READ_<change_stream_name>.
-   *
-   * All queries on change stream TVFs must be executed using the
-   * ExecuteStreamingSql API with a single-use read-only transaction with a
-   * strong read-only timestamp_bound. The change stream TVF allows users to
-   * specify the start_timestamp and end_timestamp for the time range of
-   * interest. All change records within the retention period is accessible
-   * using the strong read-only timestamp_bound. All other TransactionOptions
-   * are invalid for change stream queries.
-   *
-   * In addition, if TransactionOptions.read_only.return_read_timestamp is set
-   * to true, a special value of 2^63 - 2 will be returned in the
-   * [Transaction][google.spanner.v1.Transaction] message that describes the
-   * transaction, instead of a valid read timestamp. This special value should be
-   * discarded and not used for any subsequent queries.
-   *
-   * Please see https://cloud.google.com/spanner/docs/change-streams
-   * for more details on how to query the change stream TVFs.
-   *
-   * Partitioned DML transactions:
-   *
-   * Partitioned DML transactions are used to execute DML statements with a
-   * different execution strategy that provides different, and often better,
-   * scalability properties for large, table-wide operations than DML in a
-   * ReadWrite transaction. Smaller scoped statements, such as an OLTP workload,
-   * should prefer using ReadWrite transactions.
-   *
-   * Partitioned DML partitions the keyspace and runs the DML statement on each
-   * partition in separate, internal transactions. These transactions commit
-   * automatically when complete, and run independently from one another.
-   *
-   * To reduce lock contention, this execution strategy only acquires read locks
-   * on rows that match the WHERE clause of the statement. Additionally, the
-   * smaller per-partition transactions hold locks for less time.
-   *
-   * That said, Partitioned DML is not a drop-in replacement for standard DML used
-   * in ReadWrite transactions.
-   *
-   *  - The DML statement must be fully-partitionable. Specifically, the statement
-   *    must be expressible as the union of many statements which each access only
-   *    a single row of the table.
-   *
-   *  - The statement is not applied atomically to all rows of the table. Rather,
-   *    the statement is applied atomically to partitions of the table, in
-   *    independent transactions. Secondary index rows are updated atomically
-   *    with the base table rows.
-   *
-   *  - Partitioned DML does not guarantee exactly-once execution semantics
-   *    against a partition. The statement will be applied at least once to each
-   *    partition. It is strongly recommended that the DML statement should be
-   *    idempotent to avoid unexpected results. For instance, it is potentially
-   *    dangerous to run a statement such as
-   *    `UPDATE table SET column = column + 1` as it could be run multiple times
-   *    against some rows.
-   *
-   *  - The partitions are committed automatically - there is no support for
-   *    Commit or Rollback. If the call returns an error, or if the client issuing
-   *    the ExecuteSql call dies, it is possible that some rows had the statement
-   *    executed on them successfully. It is also possible that statement was
-   *    never executed against other rows.
-   *
-   *  - Partitioned DML transactions may only contain the execution of a single
-   *    DML statement via ExecuteSql or ExecuteStreamingSql.
-   *
-   *  - If any error is encountered during the execution of the partitioned DML
-   *    operation (for instance, a UNIQUE INDEX violation, division by zero, or a
-   *    value that cannot be stored due to schema constraints), then the
-   *    operation is stopped at that point and an error is returned. It is
-   *    possible that at this point, some partitions have been committed (or even
-   *    committed multiple times), and other partitions have not been run at all.
-   *
-   * Given the above, Partitioned DML is good fit for large, database-wide,
-   * operations that are idempotent, such as deleting old rows from a very large
-   * table.
+   * Options to use for transactions.
    * 
* * Protobuf type {@code google.spanner.v1.TransactionOptions} @@ -6488,7 +5846,7 @@ public Builder clearPartitionedDml() { * * *
-     * Transaction will not write.
+     * Transaction does not write.
      *
      * Authorization to begin a read-only transaction requires
      * `spanner.databases.beginReadOnlyTransaction` permission
@@ -6508,7 +5866,7 @@ public boolean hasReadOnly() {
      *
      *
      * 
-     * Transaction will not write.
+     * Transaction does not write.
      *
      * Authorization to begin a read-only transaction requires
      * `spanner.databases.beginReadOnlyTransaction` permission
@@ -6538,7 +5896,7 @@ public com.google.spanner.v1.TransactionOptions.ReadOnly getReadOnly() {
      *
      *
      * 
-     * Transaction will not write.
+     * Transaction does not write.
      *
      * Authorization to begin a read-only transaction requires
      * `spanner.databases.beginReadOnlyTransaction` permission
@@ -6565,7 +5923,7 @@ public Builder setReadOnly(com.google.spanner.v1.TransactionOptions.ReadOnly val
      *
      *
      * 
-     * Transaction will not write.
+     * Transaction does not write.
      *
      * Authorization to begin a read-only transaction requires
      * `spanner.databases.beginReadOnlyTransaction` permission
@@ -6590,7 +5948,7 @@ public Builder setReadOnly(
      *
      *
      * 
-     * Transaction will not write.
+     * Transaction does not write.
      *
      * Authorization to begin a read-only transaction requires
      * `spanner.databases.beginReadOnlyTransaction` permission
@@ -6627,7 +5985,7 @@ public Builder mergeReadOnly(com.google.spanner.v1.TransactionOptions.ReadOnly v
      *
      *
      * 
-     * Transaction will not write.
+     * Transaction does not write.
      *
      * Authorization to begin a read-only transaction requires
      * `spanner.databases.beginReadOnlyTransaction` permission
@@ -6657,7 +6015,7 @@ public Builder clearReadOnly() {
      *
      *
      * 
-     * Transaction will not write.
+     * Transaction does not write.
      *
      * Authorization to begin a read-only transaction requires
      * `spanner.databases.beginReadOnlyTransaction` permission
@@ -6674,7 +6032,7 @@ public com.google.spanner.v1.TransactionOptions.ReadOnly.Builder getReadOnlyBuil
      *
      *
      * 
-     * Transaction will not write.
+     * Transaction does not write.
      *
      * Authorization to begin a read-only transaction requires
      * `spanner.databases.beginReadOnlyTransaction` permission
@@ -6699,7 +6057,7 @@ public com.google.spanner.v1.TransactionOptions.ReadOnlyOrBuilder getReadOnlyOrB
      *
      *
      * 
-     * Transaction will not write.
+     * Transaction does not write.
      *
      * Authorization to begin a read-only transaction requires
      * `spanner.databases.beginReadOnlyTransaction` permission
@@ -6738,20 +6096,24 @@ public com.google.spanner.v1.TransactionOptions.ReadOnlyOrBuilder getReadOnlyOrB
      *
      *
      * 
-     * When `exclude_txn_from_change_streams` is set to `true`:
-     *  * Mutations from this transaction will not be recorded in change streams
-     *  with DDL option `allow_txn_exclusion=true` that are tracking columns
-     *  modified by these transactions.
-     *  * Mutations from this transaction will be recorded in change streams with
-     *  DDL option `allow_txn_exclusion=false or not set` that are tracking
-     *  columns modified by these transactions.
+     * When `exclude_txn_from_change_streams` is set to `true`, it prevents read
+     * or write transactions from being tracked in change streams.
+     *
+     * * If the DDL option `allow_txn_exclusion` is set to `true`, then the
+     * updates
+     *  made within this transaction aren't recorded in the change stream.
+     *
+     * * If you don't set the DDL option `allow_txn_exclusion` or if it's
+     *  set to `false`, then the updates made within this transaction are
+     *  recorded in the change stream.
      *
      * When `exclude_txn_from_change_streams` is set to `false` or not set,
-     * mutations from this transaction will be recorded in all change streams that
-     * are tracking columns modified by these transactions.
-     * `exclude_txn_from_change_streams` may only be specified for read-write or
-     * partitioned-dml transactions, otherwise the API will return an
-     * `INVALID_ARGUMENT` error.
+     * modifications from this transaction are recorded in all change streams
+     * that are tracking columns modified by these transactions.
+     *
+     * The `exclude_txn_from_change_streams` option can only be specified
+     * for read-write or partitioned DML transactions, otherwise the API returns
+     * an `INVALID_ARGUMENT` error.
      * 
* * bool exclude_txn_from_change_streams = 5; @@ -6767,20 +6129,24 @@ public boolean getExcludeTxnFromChangeStreams() { * * *
-     * When `exclude_txn_from_change_streams` is set to `true`:
-     *  * Mutations from this transaction will not be recorded in change streams
-     *  with DDL option `allow_txn_exclusion=true` that are tracking columns
-     *  modified by these transactions.
-     *  * Mutations from this transaction will be recorded in change streams with
-     *  DDL option `allow_txn_exclusion=false or not set` that are tracking
-     *  columns modified by these transactions.
+     * When `exclude_txn_from_change_streams` is set to `true`, it prevents read
+     * or write transactions from being tracked in change streams.
+     *
+     * * If the DDL option `allow_txn_exclusion` is set to `true`, then the
+     * updates
+     *  made within this transaction aren't recorded in the change stream.
+     *
+     * * If you don't set the DDL option `allow_txn_exclusion` or if it's
+     *  set to `false`, then the updates made within this transaction are
+     *  recorded in the change stream.
      *
      * When `exclude_txn_from_change_streams` is set to `false` or not set,
-     * mutations from this transaction will be recorded in all change streams that
-     * are tracking columns modified by these transactions.
-     * `exclude_txn_from_change_streams` may only be specified for read-write or
-     * partitioned-dml transactions, otherwise the API will return an
-     * `INVALID_ARGUMENT` error.
+     * modifications from this transaction are recorded in all change streams
+     * that are tracking columns modified by these transactions.
+     *
+     * The `exclude_txn_from_change_streams` option can only be specified
+     * for read-write or partitioned DML transactions, otherwise the API returns
+     * an `INVALID_ARGUMENT` error.
      * 
* * bool exclude_txn_from_change_streams = 5; @@ -6800,20 +6166,24 @@ public Builder setExcludeTxnFromChangeStreams(boolean value) { * * *
-     * When `exclude_txn_from_change_streams` is set to `true`:
-     *  * Mutations from this transaction will not be recorded in change streams
-     *  with DDL option `allow_txn_exclusion=true` that are tracking columns
-     *  modified by these transactions.
-     *  * Mutations from this transaction will be recorded in change streams with
-     *  DDL option `allow_txn_exclusion=false or not set` that are tracking
-     *  columns modified by these transactions.
+     * When `exclude_txn_from_change_streams` is set to `true`, it prevents read
+     * or write transactions from being tracked in change streams.
+     *
+     * * If the DDL option `allow_txn_exclusion` is set to `true`, then the
+     * updates
+     *  made within this transaction aren't recorded in the change stream.
+     *
+     * * If you don't set the DDL option `allow_txn_exclusion` or if it's
+     *  set to `false`, then the updates made within this transaction are
+     *  recorded in the change stream.
      *
      * When `exclude_txn_from_change_streams` is set to `false` or not set,
-     * mutations from this transaction will be recorded in all change streams that
-     * are tracking columns modified by these transactions.
-     * `exclude_txn_from_change_streams` may only be specified for read-write or
-     * partitioned-dml transactions, otherwise the API will return an
-     * `INVALID_ARGUMENT` error.
+     * modifications from this transaction are recorded in all change streams
+     * that are tracking columns modified by these transactions.
+     *
+     * The `exclude_txn_from_change_streams` option can only be specified
+     * for read-write or partitioned DML transactions, otherwise the API returns
+     * an `INVALID_ARGUMENT` error.
      * 
* * bool exclude_txn_from_change_streams = 5; diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOptionsOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOptionsOrBuilder.java index 143d0d815ab..6d8ceb61676 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOptionsOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOptionsOrBuilder.java @@ -126,7 +126,7 @@ public interface TransactionOptionsOrBuilder * * *
-   * Transaction will not write.
+   * Transaction does not write.
    *
    * Authorization to begin a read-only transaction requires
    * `spanner.databases.beginReadOnlyTransaction` permission
@@ -143,7 +143,7 @@ public interface TransactionOptionsOrBuilder
    *
    *
    * 
-   * Transaction will not write.
+   * Transaction does not write.
    *
    * Authorization to begin a read-only transaction requires
    * `spanner.databases.beginReadOnlyTransaction` permission
@@ -160,7 +160,7 @@ public interface TransactionOptionsOrBuilder
    *
    *
    * 
-   * Transaction will not write.
+   * Transaction does not write.
    *
    * Authorization to begin a read-only transaction requires
    * `spanner.databases.beginReadOnlyTransaction` permission
@@ -175,20 +175,24 @@ public interface TransactionOptionsOrBuilder
    *
    *
    * 
-   * When `exclude_txn_from_change_streams` is set to `true`:
-   *  * Mutations from this transaction will not be recorded in change streams
-   *  with DDL option `allow_txn_exclusion=true` that are tracking columns
-   *  modified by these transactions.
-   *  * Mutations from this transaction will be recorded in change streams with
-   *  DDL option `allow_txn_exclusion=false or not set` that are tracking
-   *  columns modified by these transactions.
+   * When `exclude_txn_from_change_streams` is set to `true`, it prevents read
+   * or write transactions from being tracked in change streams.
+   *
+   * * If the DDL option `allow_txn_exclusion` is set to `true`, then the
+   * updates
+   *  made within this transaction aren't recorded in the change stream.
+   *
+   * * If you don't set the DDL option `allow_txn_exclusion` or if it's
+   *  set to `false`, then the updates made within this transaction are
+   *  recorded in the change stream.
    *
    * When `exclude_txn_from_change_streams` is set to `false` or not set,
-   * mutations from this transaction will be recorded in all change streams that
-   * are tracking columns modified by these transactions.
-   * `exclude_txn_from_change_streams` may only be specified for read-write or
-   * partitioned-dml transactions, otherwise the API will return an
-   * `INVALID_ARGUMENT` error.
+   * modifications from this transaction are recorded in all change streams
+   * that are tracking columns modified by these transactions.
+   *
+   * The `exclude_txn_from_change_streams` option can only be specified
+   * for read-write or partitioned DML transactions, otherwise the API returns
+   * an `INVALID_ARGUMENT` error.
    * 
* * bool exclude_txn_from_change_streams = 5; diff --git a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOrBuilder.java b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOrBuilder.java index b41d1cb47e4..20e82465290 100644 --- a/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOrBuilder.java +++ b/proto-google-cloud-spanner-v1/src/main/java/com/google/spanner/v1/TransactionOrBuilder.java @@ -100,15 +100,13 @@ public interface TransactionOrBuilder * * *
-   * A precommit token will be included in the response of a BeginTransaction
+   * A precommit token is included in the response of a BeginTransaction
    * request if the read-write transaction is on a multiplexed session and
    * a mutation_key was specified in the
    * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    * The precommit token with the highest sequence number from this transaction
    * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    * request for this transaction.
-   * This feature is not yet supported and will result in an UNIMPLEMENTED
-   * error.
    * 
* * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; @@ -121,15 +119,13 @@ public interface TransactionOrBuilder * * *
-   * A precommit token will be included in the response of a BeginTransaction
+   * A precommit token is included in the response of a BeginTransaction
    * request if the read-write transaction is on a multiplexed session and
    * a mutation_key was specified in the
    * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    * The precommit token with the highest sequence number from this transaction
    * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    * request for this transaction.
-   * This feature is not yet supported and will result in an UNIMPLEMENTED
-   * error.
    * 
* * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; @@ -142,15 +138,13 @@ public interface TransactionOrBuilder * * *
-   * A precommit token will be included in the response of a BeginTransaction
+   * A precommit token is included in the response of a BeginTransaction
    * request if the read-write transaction is on a multiplexed session and
    * a mutation_key was specified in the
    * [BeginTransaction][google.spanner.v1.BeginTransactionRequest].
    * The precommit token with the highest sequence number from this transaction
    * attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit]
    * request for this transaction.
-   * This feature is not yet supported and will result in an UNIMPLEMENTED
-   * error.
    * 
* * .google.spanner.v1.MultiplexedSessionPrecommitToken precommit_token = 3; diff --git a/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/commit_response.proto b/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/commit_response.proto index beeb3123e27..b29663501f8 100644 --- a/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/commit_response.proto +++ b/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/commit_response.proto @@ -44,16 +44,21 @@ message CommitResponse { // The Cloud Spanner timestamp at which the transaction committed. google.protobuf.Timestamp commit_timestamp = 1; - // The statistics about this Commit. Not returned by default. + // The statistics about this `Commit`. Not returned by default. // For more information, see // [CommitRequest.return_commit_stats][google.spanner.v1.CommitRequest.return_commit_stats]. CommitStats commit_stats = 2; - // Clients should examine and retry the commit if any of the following - // reasons are populated. + // You must examine and retry the commit if the following is populated. oneof MultiplexedSessionRetry { // If specified, transaction has not committed yet. - // Clients must retry the commit with the new precommit token. + // You must retry the commit with the new precommit token. MultiplexedSessionPrecommitToken precommit_token = 4; } + + // If `TransactionOptions.isolation_level` is set to + // `IsolationLevel.REPEATABLE_READ`, then the snapshot timestamp is the + // timestamp at which all reads in the transaction ran. This timestamp is + // never returned. + google.protobuf.Timestamp snapshot_timestamp = 5; } diff --git a/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/transaction.proto b/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/transaction.proto index 612e491a3a4..81e7649f46e 100644 --- a/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/transaction.proto +++ b/proto-google-cloud-spanner-v1/src/main/proto/google/spanner/v1/transaction.proto @@ -28,330 +28,7 @@ option java_package = "com.google.spanner.v1"; option php_namespace = "Google\\Cloud\\Spanner\\V1"; option ruby_package = "Google::Cloud::Spanner::V1"; -// Transactions: -// -// Each session can have at most one active transaction at a time (note that -// standalone reads and queries use a transaction internally and do count -// towards the one transaction limit). After the active transaction is -// completed, the session can immediately be re-used for the next transaction. -// It is not necessary to create a new session for each transaction. -// -// Transaction modes: -// -// Cloud Spanner supports three transaction modes: -// -// 1. Locking read-write. This type of transaction is the only way -// to write data into Cloud Spanner. These transactions rely on -// pessimistic locking and, if necessary, two-phase commit. -// Locking read-write transactions may abort, requiring the -// application to retry. -// -// 2. Snapshot read-only. Snapshot read-only transactions provide guaranteed -// consistency across several reads, but do not allow -// writes. Snapshot read-only transactions can be configured to read at -// timestamps in the past, or configured to perform a strong read -// (where Spanner will select a timestamp such that the read is -// guaranteed to see the effects of all transactions that have committed -// before the start of the read). Snapshot read-only transactions do not -// need to be committed. -// -// Queries on change streams must be performed with the snapshot read-only -// transaction mode, specifying a strong read. Please see -// [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong] -// for more details. -// -// 3. Partitioned DML. This type of transaction is used to execute -// a single Partitioned DML statement. Partitioned DML partitions -// the key space and runs the DML statement over each partition -// in parallel using separate, internal transactions that commit -// independently. Partitioned DML transactions do not need to be -// committed. -// -// For transactions that only read, snapshot read-only transactions -// provide simpler semantics and are almost always faster. In -// particular, read-only transactions do not take locks, so they do -// not conflict with read-write transactions. As a consequence of not -// taking locks, they also do not abort, so retry loops are not needed. -// -// Transactions may only read-write data in a single database. They -// may, however, read-write data in different tables within that -// database. -// -// Locking read-write transactions: -// -// Locking transactions may be used to atomically read-modify-write -// data anywhere in a database. This type of transaction is externally -// consistent. -// -// Clients should attempt to minimize the amount of time a transaction -// is active. Faster transactions commit with higher probability -// and cause less contention. Cloud Spanner attempts to keep read locks -// active as long as the transaction continues to do reads, and the -// transaction has not been terminated by -// [Commit][google.spanner.v1.Spanner.Commit] or -// [Rollback][google.spanner.v1.Spanner.Rollback]. Long periods of -// inactivity at the client may cause Cloud Spanner to release a -// transaction's locks and abort it. -// -// Conceptually, a read-write transaction consists of zero or more -// reads or SQL statements followed by -// [Commit][google.spanner.v1.Spanner.Commit]. At any time before -// [Commit][google.spanner.v1.Spanner.Commit], the client can send a -// [Rollback][google.spanner.v1.Spanner.Rollback] request to abort the -// transaction. -// -// Semantics: -// -// Cloud Spanner can commit the transaction if all read locks it acquired -// are still valid at commit time, and it is able to acquire write -// locks for all writes. Cloud Spanner can abort the transaction for any -// reason. If a commit attempt returns `ABORTED`, Cloud Spanner guarantees -// that the transaction has not modified any user data in Cloud Spanner. -// -// Unless the transaction commits, Cloud Spanner makes no guarantees about -// how long the transaction's locks were held for. It is an error to -// use Cloud Spanner locks for any sort of mutual exclusion other than -// between Cloud Spanner transactions themselves. -// -// Retrying aborted transactions: -// -// When a transaction aborts, the application can choose to retry the -// whole transaction again. To maximize the chances of successfully -// committing the retry, the client should execute the retry in the -// same session as the original attempt. The original session's lock -// priority increases with each consecutive abort, meaning that each -// attempt has a slightly better chance of success than the previous. -// -// Under some circumstances (for example, many transactions attempting to -// modify the same row(s)), a transaction can abort many times in a -// short period before successfully committing. Thus, it is not a good -// idea to cap the number of retries a transaction can attempt; -// instead, it is better to limit the total amount of time spent -// retrying. -// -// Idle transactions: -// -// A transaction is considered idle if it has no outstanding reads or -// SQL queries and has not started a read or SQL query within the last 10 -// seconds. Idle transactions can be aborted by Cloud Spanner so that they -// don't hold on to locks indefinitely. If an idle transaction is aborted, the -// commit will fail with error `ABORTED`. -// -// If this behavior is undesirable, periodically executing a simple -// SQL query in the transaction (for example, `SELECT 1`) prevents the -// transaction from becoming idle. -// -// Snapshot read-only transactions: -// -// Snapshot read-only transactions provides a simpler method than -// locking read-write transactions for doing several consistent -// reads. However, this type of transaction does not support writes. -// -// Snapshot transactions do not take locks. Instead, they work by -// choosing a Cloud Spanner timestamp, then executing all reads at that -// timestamp. Since they do not acquire locks, they do not block -// concurrent read-write transactions. -// -// Unlike locking read-write transactions, snapshot read-only -// transactions never abort. They can fail if the chosen read -// timestamp is garbage collected; however, the default garbage -// collection policy is generous enough that most applications do not -// need to worry about this in practice. -// -// Snapshot read-only transactions do not need to call -// [Commit][google.spanner.v1.Spanner.Commit] or -// [Rollback][google.spanner.v1.Spanner.Rollback] (and in fact are not -// permitted to do so). -// -// To execute a snapshot transaction, the client specifies a timestamp -// bound, which tells Cloud Spanner how to choose a read timestamp. -// -// The types of timestamp bound are: -// -// - Strong (the default). -// - Bounded staleness. -// - Exact staleness. -// -// If the Cloud Spanner database to be read is geographically distributed, -// stale read-only transactions can execute more quickly than strong -// or read-write transactions, because they are able to execute far -// from the leader replica. -// -// Each type of timestamp bound is discussed in detail below. -// -// Strong: Strong reads are guaranteed to see the effects of all transactions -// that have committed before the start of the read. Furthermore, all -// rows yielded by a single read are consistent with each other -- if -// any part of the read observes a transaction, all parts of the read -// see the transaction. -// -// Strong reads are not repeatable: two consecutive strong read-only -// transactions might return inconsistent results if there are -// concurrent writes. If consistency across reads is required, the -// reads should be executed within a transaction or at an exact read -// timestamp. -// -// Queries on change streams (see below for more details) must also specify -// the strong read timestamp bound. -// -// See -// [TransactionOptions.ReadOnly.strong][google.spanner.v1.TransactionOptions.ReadOnly.strong]. -// -// Exact staleness: -// -// These timestamp bounds execute reads at a user-specified -// timestamp. Reads at a timestamp are guaranteed to see a consistent -// prefix of the global transaction history: they observe -// modifications done by all transactions with a commit timestamp less than or -// equal to the read timestamp, and observe none of the modifications done by -// transactions with a larger commit timestamp. They will block until -// all conflicting transactions that may be assigned commit timestamps -// <= the read timestamp have finished. -// -// The timestamp can either be expressed as an absolute Cloud Spanner commit -// timestamp or a staleness relative to the current time. -// -// These modes do not require a "negotiation phase" to pick a -// timestamp. As a result, they execute slightly faster than the -// equivalent boundedly stale concurrency modes. On the other hand, -// boundedly stale reads usually return fresher results. -// -// See -// [TransactionOptions.ReadOnly.read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.read_timestamp] -// and -// [TransactionOptions.ReadOnly.exact_staleness][google.spanner.v1.TransactionOptions.ReadOnly.exact_staleness]. -// -// Bounded staleness: -// -// Bounded staleness modes allow Cloud Spanner to pick the read timestamp, -// subject to a user-provided staleness bound. Cloud Spanner chooses the -// newest timestamp within the staleness bound that allows execution -// of the reads at the closest available replica without blocking. -// -// All rows yielded are consistent with each other -- if any part of -// the read observes a transaction, all parts of the read see the -// transaction. Boundedly stale reads are not repeatable: two stale -// reads, even if they use the same staleness bound, can execute at -// different timestamps and thus return inconsistent results. -// -// Boundedly stale reads execute in two phases: the first phase -// negotiates a timestamp among all replicas needed to serve the -// read. In the second phase, reads are executed at the negotiated -// timestamp. -// -// As a result of the two phase execution, bounded staleness reads are -// usually a little slower than comparable exact staleness -// reads. However, they are typically able to return fresher -// results, and are more likely to execute at the closest replica. -// -// Because the timestamp negotiation requires up-front knowledge of -// which rows will be read, it can only be used with single-use -// read-only transactions. -// -// See -// [TransactionOptions.ReadOnly.max_staleness][google.spanner.v1.TransactionOptions.ReadOnly.max_staleness] -// and -// [TransactionOptions.ReadOnly.min_read_timestamp][google.spanner.v1.TransactionOptions.ReadOnly.min_read_timestamp]. -// -// Old read timestamps and garbage collection: -// -// Cloud Spanner continuously garbage collects deleted and overwritten data -// in the background to reclaim storage space. This process is known -// as "version GC". By default, version GC reclaims versions after they -// are one hour old. Because of this, Cloud Spanner cannot perform reads -// at read timestamps more than one hour in the past. This -// restriction also applies to in-progress reads and/or SQL queries whose -// timestamp become too old while executing. Reads and SQL queries with -// too-old read timestamps fail with the error `FAILED_PRECONDITION`. -// -// You can configure and extend the `VERSION_RETENTION_PERIOD` of a -// database up to a period as long as one week, which allows Cloud Spanner -// to perform reads up to one week in the past. -// -// Querying change Streams: -// -// A Change Stream is a schema object that can be configured to watch data -// changes on the entire database, a set of tables, or a set of columns -// in a database. -// -// When a change stream is created, Spanner automatically defines a -// corresponding SQL Table-Valued Function (TVF) that can be used to query -// the change records in the associated change stream using the -// ExecuteStreamingSql API. The name of the TVF for a change stream is -// generated from the name of the change stream: READ_. -// -// All queries on change stream TVFs must be executed using the -// ExecuteStreamingSql API with a single-use read-only transaction with a -// strong read-only timestamp_bound. The change stream TVF allows users to -// specify the start_timestamp and end_timestamp for the time range of -// interest. All change records within the retention period is accessible -// using the strong read-only timestamp_bound. All other TransactionOptions -// are invalid for change stream queries. -// -// In addition, if TransactionOptions.read_only.return_read_timestamp is set -// to true, a special value of 2^63 - 2 will be returned in the -// [Transaction][google.spanner.v1.Transaction] message that describes the -// transaction, instead of a valid read timestamp. This special value should be -// discarded and not used for any subsequent queries. -// -// Please see https://cloud.google.com/spanner/docs/change-streams -// for more details on how to query the change stream TVFs. -// -// Partitioned DML transactions: -// -// Partitioned DML transactions are used to execute DML statements with a -// different execution strategy that provides different, and often better, -// scalability properties for large, table-wide operations than DML in a -// ReadWrite transaction. Smaller scoped statements, such as an OLTP workload, -// should prefer using ReadWrite transactions. -// -// Partitioned DML partitions the keyspace and runs the DML statement on each -// partition in separate, internal transactions. These transactions commit -// automatically when complete, and run independently from one another. -// -// To reduce lock contention, this execution strategy only acquires read locks -// on rows that match the WHERE clause of the statement. Additionally, the -// smaller per-partition transactions hold locks for less time. -// -// That said, Partitioned DML is not a drop-in replacement for standard DML used -// in ReadWrite transactions. -// -// - The DML statement must be fully-partitionable. Specifically, the statement -// must be expressible as the union of many statements which each access only -// a single row of the table. -// -// - The statement is not applied atomically to all rows of the table. Rather, -// the statement is applied atomically to partitions of the table, in -// independent transactions. Secondary index rows are updated atomically -// with the base table rows. -// -// - Partitioned DML does not guarantee exactly-once execution semantics -// against a partition. The statement will be applied at least once to each -// partition. It is strongly recommended that the DML statement should be -// idempotent to avoid unexpected results. For instance, it is potentially -// dangerous to run a statement such as -// `UPDATE table SET column = column + 1` as it could be run multiple times -// against some rows. -// -// - The partitions are committed automatically - there is no support for -// Commit or Rollback. If the call returns an error, or if the client issuing -// the ExecuteSql call dies, it is possible that some rows had the statement -// executed on them successfully. It is also possible that statement was -// never executed against other rows. -// -// - Partitioned DML transactions may only contain the execution of a single -// DML statement via ExecuteSql or ExecuteStreamingSql. -// -// - If any error is encountered during the execution of the partitioned DML -// operation (for instance, a UNIQUE INDEX violation, division by zero, or a -// value that cannot be stored due to schema constraints), then the -// operation is stopped at that point and an error is returned. It is -// possible that at this point, some partitions have been committed (or even -// committed multiple times), and other partitions have not been run at all. -// -// Given the above, Partitioned DML is good fit for large, database-wide, -// operations that are idempotent, such as deleting old rows from a very large -// table. +// Options to use for transactions. message TransactionOptions { // Message type to initiate a read-write transaction. Currently this // transaction type has no options. @@ -361,23 +38,26 @@ message TransactionOptions { enum ReadLockMode { // Default value. // - // * If isolation level is `REPEATABLE_READ`, then it is an error to - // specify `read_lock_mode`. Locking semantics default to `OPTIMISTIC`. - // No validation checks are done for reads, except for: + // * If isolation level is + // [REPEATABLE_READ][google.spanner.v1.TransactionOptions.IsolationLevel.REPEATABLE_READ], + // then it is an error to specify `read_lock_mode`. Locking semantics + // default to `OPTIMISTIC`. No validation checks are done for reads, + // except to validate that the data that was served at the snapshot time + // is unchanged at commit time in the following cases: // 1. reads done as part of queries that use `SELECT FOR UPDATE` // 2. reads done as part of statements with a `LOCK_SCANNED_RANGES` // hint // 3. reads done as part of DML statements - // to validate that the data that was served at the snapshot time is - // unchanged at commit time. // * At all other isolation levels, if `read_lock_mode` is the default - // value, then pessimistic read lock is used. + // value, then pessimistic read locks are used. READ_LOCK_MODE_UNSPECIFIED = 0; // Pessimistic lock mode. // // Read locks are acquired immediately on read. - // Semantics described only applies to `SERIALIZABLE` isolation. + // Semantics described only applies to + // [SERIALIZABLE][google.spanner.v1.TransactionOptions.IsolationLevel.SERIALIZABLE] + // isolation. PESSIMISTIC = 1; // Optimistic lock mode. @@ -385,7 +65,9 @@ message TransactionOptions { // Locks for reads within the transaction are not acquired on read. // Instead the locks are acquired on a commit to validate that // read/queried data has not changed since the transaction started. - // Semantics described only applies to `SERIALIZABLE` isolation. + // Semantics described only applies to + // [SERIALIZABLE][google.spanner.v1.TransactionOptions.IsolationLevel.SERIALIZABLE] + // isolation. OPTIMISTIC = 2; } @@ -395,8 +77,6 @@ message TransactionOptions { // Optional. Clients should pass the transaction ID of the previous // transaction attempt that was aborted if this transaction is being // executed on a multiplexed session. - // This feature is not yet supported and will result in an UNIMPLEMENTED - // error. bytes multiplexed_session_previous_transaction_id = 2 [(google.api.field_behavior) = OPTIONAL]; } @@ -442,7 +122,7 @@ message TransactionOptions { // Executes all reads at the given timestamp. Unlike other modes, // reads at a specific timestamp are repeatable; the same read at // the same timestamp always returns the same data. If the - // timestamp is in the future, the read will block until the + // timestamp is in the future, the read is blocked until the // specified timestamp, modulo the read's deadline. // // Useful for large scale consistent reads such as mapreduces, or @@ -491,9 +171,9 @@ message TransactionOptions { SERIALIZABLE = 1; // All reads performed during the transaction observe a consistent snapshot - // of the database, and the transaction will only successfully commit in the - // absence of conflicts between its updates and any concurrent updates that - // have occurred since that snapshot. Consequently, in contrast to + // of the database, and the transaction is only successfully committed in + // the absence of conflicts between its updates and any concurrent updates + // that have occurred since that snapshot. Consequently, in contrast to // `SERIALIZABLE` transactions, only write-write conflicts are detected in // snapshot transactions. // @@ -521,7 +201,7 @@ message TransactionOptions { // on the `session` resource. PartitionedDml partitioned_dml = 3; - // Transaction will not write. + // Transaction does not write. // // Authorization to begin a read-only transaction requires // `spanner.databases.beginReadOnlyTransaction` permission @@ -529,20 +209,24 @@ message TransactionOptions { ReadOnly read_only = 2; } - // When `exclude_txn_from_change_streams` is set to `true`: - // * Mutations from this transaction will not be recorded in change streams - // with DDL option `allow_txn_exclusion=true` that are tracking columns - // modified by these transactions. - // * Mutations from this transaction will be recorded in change streams with - // DDL option `allow_txn_exclusion=false or not set` that are tracking - // columns modified by these transactions. + // When `exclude_txn_from_change_streams` is set to `true`, it prevents read + // or write transactions from being tracked in change streams. + // + // * If the DDL option `allow_txn_exclusion` is set to `true`, then the + // updates + // made within this transaction aren't recorded in the change stream. + // + // * If you don't set the DDL option `allow_txn_exclusion` or if it's + // set to `false`, then the updates made within this transaction are + // recorded in the change stream. // // When `exclude_txn_from_change_streams` is set to `false` or not set, - // mutations from this transaction will be recorded in all change streams that - // are tracking columns modified by these transactions. - // `exclude_txn_from_change_streams` may only be specified for read-write or - // partitioned-dml transactions, otherwise the API will return an - // `INVALID_ARGUMENT` error. + // modifications from this transaction are recorded in all change streams + // that are tracking columns modified by these transactions. + // + // The `exclude_txn_from_change_streams` option can only be specified + // for read-write or partitioned DML transactions, otherwise the API returns + // an `INVALID_ARGUMENT` error. bool exclude_txn_from_change_streams = 5; // Isolation level for the transaction. @@ -569,15 +253,13 @@ message Transaction { // Example: `"2014-10-02T15:01:23.045123456Z"`. google.protobuf.Timestamp read_timestamp = 2; - // A precommit token will be included in the response of a BeginTransaction + // A precommit token is included in the response of a BeginTransaction // request if the read-write transaction is on a multiplexed session and // a mutation_key was specified in the // [BeginTransaction][google.spanner.v1.BeginTransactionRequest]. // The precommit token with the highest sequence number from this transaction // attempt should be passed to the [Commit][google.spanner.v1.Spanner.Commit] // request for this transaction. - // This feature is not yet supported and will result in an UNIMPLEMENTED - // error. MultiplexedSessionPrecommitToken precommit_token = 3; } @@ -609,8 +291,10 @@ message TransactionSelector { // When a read-write transaction is executed on a multiplexed session, // this precommit token is sent back to the client -// as a part of the [Transaction] message in the BeginTransaction response and -// also as a part of the [ResultSet] and [PartialResultSet] responses. +// as a part of the [Transaction][google.spanner.v1.Transaction] message in the +// [BeginTransaction][google.spanner.v1.BeginTransactionRequest] response and +// also as a part of the [ResultSet][google.spanner.v1.ResultSet] and +// [PartialResultSet][google.spanner.v1.PartialResultSet] responses. message MultiplexedSessionPrecommitToken { // Opaque precommit token. bytes precommit_token = 1;